code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
Compute and plot statistics such as the mean in a rolling window of data.
Copyright 2016 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
import sys
import numpy
from matplotlib import pyplot
from . import stats, arrays
def _get_size_offset(data, size, overlap, fit_edges=True):
"""
Utility function for determining rolling window parameters, for given data size.
size: nominal size of the rolling window. If a fraction, is multiplied by
data size.
overlap: How much consecutive windows overlap, expressed as an integer number
of windows a data point appears in.
fit_edges: If True (default), tweak size parameter so that last datapoint
forms the last datapoint of the last window.
Returns size and offset parameters of rolling window.
"""
if size <= 1:
if isinstance(data, numpy.ma.masked_array):
size = int(round(size * data.count()))
else: size = int(round(size * len(data)))
offset = max(int(round(size / overlap)), 1)
if fit_edges:
while ((len(data) - size) % offset): size += 1
return size, offset
def rolling_stats(data, size=0.05, overlap=5, weights=None, cls='Full',
fit_edges=True, offset=None):
"""
Return an array of stats, as a stats.Datab object, for given data. Each element
corresponds to statistics on a subsample of the data, as selected by the
moving window with parameters size and offset. Offset parameter is determined
via overlap parameter if not explicitly given -- see _get_size_offset().
Data can be 2D, in which case stats are multivariate.
"""
if offset is None:
size, offset = _get_size_offset(data, size, overlap, fit_edges=fit_edges)
if weights is not None:
weights = arrays.rolling_window(weights, size, offset)
if numpy.ndim(data) == 1:
windows = arrays.rolling_window(data, size, offset)
stats_obj = getattr(stats, cls)
return stats_obj.stats(windows, weights=weights, axis=1, label_all=None)
elif numpy.ndim(data) != 2:
sys.exit('Data must be one or two dimensional.')
# rolling_window works only on the last axis, so perform the necessary
# axis manipulations.
windows = arrays.rolling_window(data.T, size, offset)
windows = windows.swapaxes(0,1).swapaxes(1,2)
return stats.Multivariate.stats(windows, weights=weights, axis=0, label_all=None)
def nearest_stats(x, y, x_weights=None, y_weights=None, size=0.05, cls='Full',
sliced=None, select=None, overlay=None, overlap=5, fit_edges=True):
"""
Sort x and y input arrays by values of x, and return rolling stats on the results.
The rolling stats can be weighted, with separate weights for x and for y. If only
x_weights are given, y_weights are set == x_weights.
y can have one extra dimension than x, in which case the y rolling stats are
Multivariate stats.
size, overlap, fit_edges: rolling window parameters; see rolling_stats.
sliced, select: optional array element selection parameters.
"""
x, y, x_weights, y_weights = arrays.select((x, y, x_weights, y_weights),
sliced=sliced, select=select, overlay=overlay)
indices = arrays.argsort(x)
x = x[indices]
y = y[indices]
if y_weights is not None:
if x_weights is None: sys.stderr.write('Warn: unusual options to nearest stats() -- y_weights present but x_weights absent')
y_weights = y_weights[indices]
if x_weights is not None:
x_weights = x_weights[indices]
if y_weights is None: y_weights = x_weights
size, offset = _get_size_offset(x, size, overlap,
fit_edges=fit_edges)
return rolling_stats(x, size=size, offset=offset, weights=x_weights, cls=cls), \
rolling_stats(y, size=size, offset=offset, weights=y_weights, cls=cls)
def plot(x, y, x_weights=None, y_weights=None, size=0.05,
sliced=None, select=None, overlay=None,
x_statistic='mean', y_statistic=None,
error_band=None, error_statistic='std_err',
overlap=5, fit_edges=True, line_args=[], error_args=['g']):
"""
Produce a moving average plot of y against x.
By default, +/1.0 standard error bands are additionally plotted. Specify
some other value to error_band option as desired.
The moving average can be weighted, with separate weights for x and for y. If only
x_weights are given, y_weights are set == x_weights.
Instead of a moving average, some other moving statistic can be plotted by
setting [x|y]_statistic option (e.g. to 'median'). If x_statistic option is
set to None, a rank transform of x is used for the x-axis.
y can have one extra dimension than x, in which case the y rolling stats are
Multivariate stats, and the plotted statistic defaults to sum(y0*y1)/sum(y1*y1).
size, overlap, fit_edges: rolling window parameters; see rolling_stats.
sliced, select: optional array element selection parameters.
line_args, error_args:
args to pass to plot() when plotting the main line and error bands respectively.
"""
if numpy.ndim(y) > numpy.ndim(x):
# multivariate response; plot sum(y0*y1)/sum(y1*y1) by default
if y_statistic is None: y_statistic = 'coeff_0_1'
else:
if error_band is None: error_band = 1.0
if y_statistic is None: y_statistic = 'mean'
cls = 'Sparse'
for statistic in (x_statistic, y_statistic):
if not statistic: continue
if statistic in ('median', 'mad') or statistic[-2:] == 'le': cls = 'Full'
x_stats, y_stats = nearest_stats(x, y, x_weights=x_weights, y_weights=y_weights,
size=size, sliced=sliced, select=select, overlay=overlay,
overlap=overlap, fit_edges=fit_edges, cls=cls)
if x_statistic: x_values = x_stats[x_statistic]
else: x_values = numpy.arange(len(x_stats)) / len(x_stats)
if line_args and numpy.isscalar(line_args): line_args = [line_args]
pyplot.plot(x_values, y_stats[y_statistic], *line_args)
if not error_band: return
if error_args and numpy.isscalar(error_args): error_args = [error_args]
pyplot.plot(x_values, y_stats[y_statistic] + y_stats[error_statistic] * error_band, *error_args)
pyplot.plot(x_values, y_stats[y_statistic] - y_stats[error_statistic] * error_band, *error_args)
| Fenugreek/tamarind | moving.py | Python | gpl-3.0 | 6,793 |
import threading
import mysql.connector.pooling
import re
if __name__ == "__main__":
import catdb
else:
from lib import catdb
# Allows multithreading when creating a connection to database and executing an SQL statement.
# threadID: a number representing this unique thread.
# catalogconn: a mysql connection for the catalog.
# nodeconn: a mysql connection for the node to be executed on.
# sqlstatement: the exact SQL statement that will be executed on the node.
# sqlstatement_name: used for error message.
# dtablerow: A dictionary with keys matching the columns in DTABLES.
# dtablerow must contain values for the following keys depending on the sql statement:
# CREATE TABLE: tname, nodedriver, nodeurl, nodeuser, nodepasswd, nodeid
# DROP TABLE: tname
# PARTITION UPDATE (using csvLoader): tname, partmtd, partcol, partparam1, partparam2, nodeid
# OTHER: dtablerow is not needed.
class ConnectionThread (threading.Thread):
def __init__(self, threadID, catalogconn, nodeconn, sqlstatement, sqlstatement_name, dtablerow=None):
threading.Thread.__init__(self)
self.threadID = threadID
self.catalogconn = catalogconn
self.nodeconn = nodeconn
self.sqlstatement = sqlstatement
self.sqlstatement_name = sqlstatement_name
self.dtablerow = dtablerow
def run(self):
success = False
prevent_success_msg = False
try:
cursor = self.nodeconn.cursor()
self.nodeconn.start_transaction()
# If create table then execute and update catalog.
if re.search("CREATE TABLE", self.sqlstatement, flags=re.IGNORECASE | re.MULTILINE):
cursor.execute(self.sqlstatement)
result = catdb.insertTable(self.catalogconn, self.dtablerow) # Updates dtables
if result:
self.nodeconn.commit()
success = True
else:
print("Rolling back transaction for thread {}".format(self.threadID))
self.nodeconn.rollback()
elif re.search("DROP TABLE", self.sqlstatement, flags=re.IGNORECASE | re.MULTILINE):
cursor.execute(self.sqlstatement)
result = catdb.removeByTable(self.catalogconn, self.dtablerow) # Updates dtables
if result:
self.nodeconn.commit()
success = True
else:
print("Rolling back transaction for thread {}".format(self.threadID))
self.nodeconn.rollback()
elif re.search("SELECT ", self.sqlstatement, flags=re.IGNORECASE | re.MULTILINE):
cursor.execute(self.sqlstatement)
results = cursor.fetchall()
for row in results:
print(*row, sep=' | ')
success = True
prevent_success_msg = True
except mysql.connector.Error as err:
print("SQL ERROR: {}".format(err.msg))
finally:
if success and not prevent_success_msg:
self.__printSuccess()
elif not success:
self.__printFailure()
cursor.close()
self.nodeconn.close()
def __printSuccess(self):
if not self.sqlstatement_name:
self.sqlstatement_name = 'sql'
try:
print("[{}:{}/{}]: {} success".format(self.nodeconn.server_host, self.nodeconn.server_port, self.nodeconn.database, self.sqlstatement_name))
except:
try:
print("[{}]: {} success".format(self.dtablerow['nodeurl'], self.sqlstatement_name))
except:
print("THREAD {}: success".format(self.threadID))
def __printFailure(self):
if not self.sqlstatement_name:
self.sqlstatement_name = 'sql'
try:
print("[{}:{}/{}]: {} failure".format(self.nodeconn.server_host, self.nodeconn.server_port, self.nodeconn.database, self.sqlstatement_name))
except AttributeError:
try:
print("[{}]: {} failure".format(self.dtablerow['nodeurl'], self.sqlstatement_name))
except:
print("THREAD {}: failure".format(self.threadID))
if __name__ == "__main__":
clustercfg = {
'hostname': "jdbc:db2://127.0.0.1:3306/testdb",
'username': "dbuser",
'passwd': "mypasswd"
}
sqlstatement1 = "CREATE TABLE BOOKS(isbn char(14), title char(80), price decimal);"
sqlstatement2 = "DROP TABLE BOOKS;"
dtablerow = {
'tname': "BOOKS",
'nodedriver': "test",
'nodeurl': "jdbc:db2://127.0.0.1:3306/testdb",
'nodeuser': "dbuser",
'nodepasswd': "mypasswd",
'partmtd': None,
'nodeid': 1,
'partcol': None,
'partparam1': None,
'partparam2': None
}
cparams = catdb.getCatalogParams(clustercfg)
connection = mysql.connector.connect(**cparams)
cursor = connection.cursor()
cursor.execute("DROP TABLE IF EXISTS DTABLES;")
cursor.execute("DROP TABLE IF EXISTS BOOKS;")
cursor.close()
connection.close()
cnxpool = mysql.connector.pooling.MySQLConnectionPool(pool_name = "cnxpool", pool_size = 6, **cparams)
catdb.makeDtables(cnxpool.get_connection())
input("Press Enter to create table")
thread = ConnectionThread(dtablerow['nodeid'], cnxpool.get_connection(), cnxpool.get_connection(), sqlstatement1, "sqlstatement1", dtablerow)
thread.run()
input("Press Enter to drop table")
thread = ConnectionThread(dtablerow['nodeid'], cnxpool.get_connection(), cnxpool.get_connection(), sqlstatement2, "sqlstatement2", dtablerow)
thread.run()
| RoryAndrews/Python-Parallel-DB | lib/ConnectionThread.py | Python | mit | 5,759 |
import os
import re
import tempfile
import synth_test
import verilator_test
# Set from external.
iroha_binary=""
with_synth_test=False
with_verilator_test=False
karuta_binary="../karuta-bin"
tmp_prefix = "/tmp"
default_tb="test_tb.v"
verilog_compiler="iverilog"
def FileBase(fn):
bn = os.path.basename(fn)
# assumes .karuta
return bn[:-7]
def CheckLog(fn, exp):
num_fails = 0
done_stat = 0
has_exp = False
ifh = open(fn, "r")
for line in ifh:
if re.search("ASSERTION FAILURE", line):
num_fails = num_fails + 1
if re.search("ASSERTION UNDEFINED", line):
num_fails = num_fails + 1
if re.search("KARUTA DONE", line):
done_stat = 1
if re.search("error", line):
num_fails = num_fails + 1
if exp and re.search(exp, line):
has_exp = True
if exp and not has_exp:
num_fails = num_fails + 1
return {"num_fails":num_fails,
"done_stat":done_stat}
def ReadTestInfo(fn):
test_info = {"exp_fails":0,
"vl_exp_fails":0,
"karuta_ignore_errors":0,
"exp_abort":0}
dn = os.path.dirname(fn)
if dn != "":
dn = dn + "/"
test_info["dirname"] = dn
ifh = open(fn, "r")
for line in ifh:
m = re.search("KARUTA_EXPECTED_ERRORS: (\d+)", line)
if m:
test_info["exp_fails"] = int(m.group(1))
m = re.search("VERILOG_EXPECTED_ERRORS: (\d+)", line)
if m:
test_info["vl_exp_fails"] = int(m.group(1))
m = re.search("VERILOG_EXPECTED_OUTPUT: (\S+)", line)
if m:
test_info["vl_exp_output"] = m.group(1)
m = re.search("VERILOG_OUTPUT: (\S+)", line)
if m:
test_info["verilog"] = m.group(1)
m = re.search("VERILOG_TB: (\S+)", line)
if m:
test_info["verilog_tb"] = m.group(1)
m = re.search("KARUTA_IGNORE_ERRORS:", line)
if m:
test_info["karuta_ignore_errors"] = 1
m = re.search("KARUTA_TIMEOUT: (\d+)", line)
if m:
test_info["karuta_timeout"] = int(m.group(1))
m = re.search("KARUTA_EXPECT_ABORT:", line)
if m:
test_info["exp_abort"] = 1
m = re.search("KARUTA_SPLIT_TEST: (\S+)", line)
if m:
test_info["split_info"] = m.group(1)
m = re.search("SELF_SHELL:", line)
if m:
test_info["self_shell"] = 1
test_info["verilog"] = FileBase(fn) + ".v"
return test_info
def GetVerilogCompileCommand(dut_fn, tb_fn, test_bin_fn):
cmd = (verilog_compiler + " -o " + test_bin_fn + " " +
dut_fn + " " + tb_fn + " -I../lib")
return cmd
def CheckVerilog(dut_fn, source_fn, summary, test_info):
test_bin_fn = tempfile.mktemp()
test_log_fn = tempfile.mktemp()
if "self_shell" in test_info:
tb_fn = ""
elif "verilog_tb" in test_info:
tb_fn = test_info["dirname"] + test_info["verilog_tb"]
else:
tb_fn = default_tb
cmd = GetVerilogCompileCommand(dut_fn, tb_fn, test_bin_fn)
print(" compiling " + dut_fn + "(" + cmd + ")")
os.system(cmd)
if not os.path.isfile(test_bin_fn):
summary.AddVerilogCompileFailure(source_fn)
return
test_cmd = test_bin_fn + ">" + test_log_fn
print (" running verilog executable " + test_bin_fn +
"(" + test_cmd + ")")
os.system(test_cmd)
exp = None
if "vl_exp_output" in test_info:
exp = test_info["vl_exp_output"]
res = CheckLog(test_log_fn, exp)
if with_synth_test:
print(" testing synthesizability " + dut_fn)
sr = synth_test.Process(dut_fn)
if not sr:
print("Failed!")
e = 0
if "num_fails" in res:
e = res["num_fails"]
res["num_fails"] = e + 1
if with_verilator_test:
print(" testing with Verilator " + dut_fn)
vr = verilator_test.Process(dut_fn)
if not vr:
print("Failed!")
e = 0
if "num_fails" in res:
e = res["num_fails"]
res["num_fails"] = e + 1
num_fails = res["num_fails"]
exp_fails = test_info["vl_exp_fails"]
summary.AddVerilogResult(source_fn, num_fails,
test_info["karuta_ignore_errors"],
exp_fails)
try:
os.unlink(test_bin_fn)
os.unlink(test_log_fn)
except:
pass
def GetKarutaCommand(source_fn, tf, test_info):
vanilla = "--vanilla"
if "verilog" in test_info:
# verilog tests requires imported modules.
vanilla = ""
timeout = "1000"
if "karuta_timeout" in test_info:
timeout = str(test_info["karuta_timeout"])
cmd = "KARUTA_DIR=../lib "
cmd += karuta_binary + " " + source_fn + " " + vanilla
if iroha_binary != "":
cmd += " --iroha_binary " + iroha_binary
cmd += " --root " + tmp_prefix
cmd += " --timeout " + timeout + " "
cmd += " --print_exit_status "
if "self_shell" in test_info:
cmd += " --compile --with_shell "
else:
cmd += " --module_prefix=mod "
cmd += " > " + tf
return cmd
class KarutaTest():
def __init__(self, source_fn):
self.source_fn = source_fn
def RunTest(self, summary):
test_info = ReadTestInfo(self.source_fn)
tf = tempfile.mktemp()
cmd = GetKarutaCommand(self.source_fn, tf, test_info)
if "verilog" in test_info:
try:
os.unlink(tmp_prefix + "/" + test_info["verilog"])
except:
pass
print("executing test " + self.source_fn)
if "split_info" in test_info:
print(" split: " + str(test_info["split_info"]))
print(" command line=" + cmd)
rv = os.system(cmd)
if rv:
summary.AddAbort(rv)
if rv == 0 and "verilog" in test_info:
CheckVerilog(tmp_prefix + "/" + test_info["verilog"],
self.source_fn,
summary, test_info)
res = CheckLog(tf, None)
num_fails = res["num_fails"]
done_stat = res["done_stat"]
exp_fails = test_info["exp_fails"]
exp_abort = test_info["exp_abort"]
summary.AddResult(self.source_fn,
num_fails,
test_info["karuta_ignore_errors"],
done_stat, exp_abort, exp_fails)
os.unlink(tf)
| nlsynth/nli | tests/karuta_test.py | Python | gpl-3.0 | 6,556 |
#!/usr/bin/env python2
from sys import stdin
n = raw_input().split(' ')
k = int(n[1])
n = int(n[0])
ans = 0
for i in range(0, n):
t = int ( stdin.readline() )
if (t%k) == 0: ans += 1
print (ans)
| jailuthra/misc | codechef/intest.py | Python | mit | 207 |
#!/usr/bin/env python
import copy
import json
import os
import os.path
import re
import sys
from collections import OrderedDict
from CTDopts.CTDopts import (
_Choices,
_FileFormat,
_InFile,
_Null,
_NumericRange,
_OutFile,
_OutPrefix,
ModelError,
ParameterGroup
)
from lxml import etree
from lxml.etree import (
CDATA,
Element,
ElementTree,
parse,
ParseError,
strip_elements,
SubElement
)
from ..common import (
logger,
utils
)
from ..common.exceptions import (
ApplicationException,
InvalidModelException
)
# mapping to CTD types to Galaxy types
TYPE_TO_GALAXY_TYPE = {int: 'integer', float: 'float', str: 'text', bool: 'boolean', _InFile: 'txt',
_OutFile: 'txt', _Choices: 'select', _OutPrefix: 'output-prefix'}
GALAXY_TYPE_TO_TYPE = dict()
for k in TYPE_TO_GALAXY_TYPE:
GALAXY_TYPE_TO_TYPE[TYPE_TO_GALAXY_TYPE[k]] = k
STDIO_MACRO_NAME = "stdio"
REQUIREMENTS_MACRO_NAME = "requirements"
ADVANCED_OPTIONS_NAME = "adv_opts_"
REQUIRED_MACROS = [REQUIREMENTS_MACRO_NAME, STDIO_MACRO_NAME, ADVANCED_OPTIONS_NAME + "macro"]
class ExitCode:
def __init__(self, code_range="", level="", description=None):
self.range = code_range
self.level = level
self.description = description
class DataType:
def __init__(self, extension, galaxy_extension, composite=None):
self.extension = extension
self.galaxy_extension = galaxy_extension
self.composite = composite
def add_specific_args(parser):
"""
add command line arguments specific for galaxy tool generation
@param parser an instance of ArgumentParser
"""
parser.add_argument("-f", "--formats-file", dest="formats_file",
help="File containing the supported file formats. Run with '-h' or '--help' to see a "
"brief example on the layout of this file.", default=None, required=False)
parser.add_argument("-a", "--add-to-command-line", dest="add_to_command_line",
help="Adds content to the command line", default="", required=False)
parser.add_argument("-d", "--datatypes-destination", dest="data_types_destination",
help="Specify the location of a datatypes_conf.xml to modify and add the registered "
"data types. If the provided destination does not exist, a new file will be created.",
default=None, required=False)
parser.add_argument("-c", "--default-category", dest="default_category", default="DEFAULT", required=False,
help="Default category to use for tools lacking a category when generating tool_conf.xml")
parser.add_argument("-t", "--tool-conf-destination", dest="tool_conf_destination", default=None, required=False,
help="Specify the location of an existing tool_conf.xml that will be modified to include "
"the converted tools. If the provided destination does not exist, a new file will"
"be created.")
parser.add_argument("-g", "--galaxy-tool-path", dest="galaxy_tool_path", default=None, required=False,
help="The path that will be prepended to the file names when generating tool_conf.xml")
parser.add_argument("-r", "--required-tools", dest="required_tools_file", default=None, required=False,
help="Each line of the file will be interpreted as a tool name that needs translation. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-s", "--skip-tools", dest="skip_tools_file", default=None, required=False,
help="File containing a list of tools for which a Galaxy stub will not be generated. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-m", "--macros", dest="macros_files", default=[], nargs="*",
action="append", required=None, help="Import the additional given file(s) as macros. "
"The macros stdio, requirements and advanced_options are "
"required. Please see galaxy/macros.xml for an example of a "
"valid macros file. All defined macros will be imported.")
parser.add_argument("--test-macros", dest="test_macros_files", default=[], nargs="*",
action="append", required=None,
help="Import tests from the files given file(s) as macros. "
"The macro names must end with the id of the tools")
parser.add_argument("--test-macros-prefix", dest="test_macros_prefix", default=[], nargs="*",
action="append", required=None, help="The prefix of the macro name in the corresponding trest macros file")
parser.add_argument("--test-test", dest="test_test", action='store_true', default=False, required=False,
help="Generate a simple test for the internal unit tests.")
parser.add_argument("--test-only", dest="test_only", action='store_true', default=False, required=False,
help="Generate only the test section.")
parser.add_argument("--test-unsniffable", dest="test_unsniffable", nargs="+", default=[], required=False,
help="File extensions that can't be sniffed in Galaxy."
"Needs to be the OpenMS extensions (1st column in --formats-file)."
"For testdata with such extensions ftype will be set in the tes according to the file extension")
parser.add_argument("--tool-version", dest="tool_version", required=False, default=None,
help="Tool version to use (if not given its extracted from the CTD)")
parser.add_argument("--tool-profile", dest="tool_profile", required=False, default=None,
help="Tool profile version to use (if not given its not set)")
parser.add_argument("--bump-file", dest="bump_file", required=False,
default=None, help="json file defining tool versions."
"tools not listed in the file default to 0."
"if not given @GALAXY_VERSION@ is used")
def modify_param_for_galaxy(param):
"""
some parameters need galaxy specific modifications
"""
if param.type is _InFile:
# if file default is given (happens for external applications and
# files for which the default is taken from share/OpenMS) set the
# parm to not required and remove the default (external applications
# need to be taken care by hardcoded values and the other cases
# are chosen automatically if not specified on the command line)
if param.required and not (param.default is None or type(param.default) is _Null):
logger.warning(f"Data parameter {param.name} with default ({param.default})", 1)
param.required = False
param.default = _Null()
return param
def convert_models(args, parsed_ctds):
"""
main conversion function
@param args command line arguments
@param parsed_ctds the ctds
"""
# validate and prepare the passed arguments
validate_and_prepare_args(args, parsed_ctds[0].ctd_model)
# parse the given supported file-formats file
supported_file_formats = parse_file_formats(args.formats_file)
# extract the names of the macros and check that we have found the ones we need
macros_to_expand = parse_macros_files(args.macros_files,
tool_version=args.tool_version,
supported_file_types=supported_file_formats,
required_macros=REQUIRED_MACROS,
dont_expand=[ADVANCED_OPTIONS_NAME + "macro", "references",
"list_string_val", "list_string_san",
"list_float_valsan", "list_integer_valsan"])
bump = parse_bump_file(args.bump_file)
check_test_macros(args.test_macros_files, args.test_macros_prefix, parsed_ctds)
# parse the skip/required tools files
skip_tools = parse_tools_list_file(args.skip_tools_file)
required_tools = parse_tools_list_file(args.required_tools_file)
_convert_internal(parsed_ctds,
supported_file_formats=supported_file_formats,
default_executable_path=args.default_executable_path,
add_to_command_line=args.add_to_command_line,
required_tools=required_tools,
skip_tools=skip_tools,
macros_file_names=args.macros_files,
macros_to_expand=macros_to_expand,
parameter_hardcoder=args.parameter_hardcoder,
test_test=args.test_test,
test_only=args.test_only,
test_unsniffable=args.test_unsniffable,
test_macros_file_names=args.test_macros_files,
test_macros_prefix=args.test_macros_prefix,
tool_version=args.tool_version,
tool_profile=args.tool_profile,
bump=bump)
def parse_bump_file(bump_file):
if bump_file is None:
return None
with open(bump_file) as fp:
return json.load(fp)
def parse_tools_list_file(tools_list_file):
"""
"""
tools_list = None
if tools_list_file is not None:
tools_list = []
with open(tools_list_file) as f:
for line in f:
if line is None or not line.strip() or line.strip().startswith("#"):
continue
else:
tools_list.append(line.strip())
return tools_list
def parse_macros_files(macros_file_names, tool_version, supported_file_types, required_macros=[], dont_expand=[]):
"""
"""
macros_to_expand = []
for macros_file_name in macros_file_names:
try:
macros_file = open(macros_file_name)
logger.info("Loading macros from %s" % macros_file_name, 0)
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if name in macros_to_expand:
logger.warning("Macro %s has already been found. Duplicate found in file %s." %
(name, macros_file_name), 0)
continue
logger.info("Macro %s found" % name, 1)
macros_to_expand.append(name)
except ParseError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be parsed. Cause: " + str(e))
except OSError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be opened. Cause: " + str(e))
else:
macros_file.close()
tool_ver_tk = root.find("token[@name='@TOOL_VERSION@']")
galaxy_ver_tk = root.find("token[@name='@GALAXY_VERSION@']")
if tool_ver_tk is None:
tool_ver_tk = add_child_node(root, "token", OrderedDict([("name", "@TOOL_VERSION@")]))
tool_ver_tk.text = tool_version
if galaxy_ver_tk is not None:
if tool_version == tool_ver_tk.text:
galaxy_ver_tk.text = str(int(galaxy_ver_tk.text))
else:
tool_ver_tk.text = tool_version
galaxy_ver_tk.text = "0"
ext_foo = root.find("token[@name='@EXT_FOO@']")
if ext_foo is None:
ext_foo = add_child_node(root, "token", OrderedDict([("name", "@EXT_FOO@")]))
g2o, o2g = get_fileformat_maps(supported_file_types)
# make sure that the backup data type is in the map
if 'txt' not in g2o:
g2o['txt'] = 'txt'
ext_foo.text = CDATA("""#def oms2gxyext(o)
#set m={}
#return m[o]
#end def
#def gxy2omsext(g)
#set m={}
#return m[g]
#end def
""".format(str(o2g), str(g2o)))
tree = ElementTree(root)
tree.write(macros_file_name, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# with open(macros_file_name, "w") as macros_file:
# tree = ElementTree(root)
# tree.write(macros_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# we depend on "stdio", "requirements" and "advanced_options" to exist on all the given macros files
missing_needed_macros = []
for required_macro in required_macros:
if required_macro not in macros_to_expand:
missing_needed_macros.append(required_macro)
if missing_needed_macros:
raise ApplicationException(
"The following required macro(s) were not found in any of the given macros files: %s, "
"see galaxy/macros.xml for an example of a valid macros file."
% ", ".join(missing_needed_macros))
# remove macros that should not be expanded
for m in dont_expand:
try:
idx = macros_to_expand.index(m)
del macros_to_expand[idx]
except ValueError:
pass
return macros_to_expand
def check_test_macros(test_macros_files, test_macros_prefix, parsed_ctds):
tool_ids = set()
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
tool_ids.add(model.name.replace(" ", "_"))
for mf, mp in zip(test_macros_files, test_macros_prefix):
macro_ids = set()
try:
with open(mf) as macros_file:
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if not name.startswith(mp):
logger.warning("Testmacro with invalid prefix %s." % (mp), 0)
continue
name = name[len(mp):]
macro_ids.add(name)
except ParseError as e:
raise ApplicationException("The macros file " + mf + " could not be parsed. Cause: " + str(e))
except OSError as e:
raise ApplicationException("The macros file " + mf + " could not be opened. Cause: " + str(e))
for t in tool_ids - macro_ids:
logger.error("missing %s" % t)
add_child_node(root, "xml", OrderedDict([("name", mp + t)]))
if len(macro_ids - tool_ids):
logger.warning("Unnecessary macros in {}: {}".format(mf, macro_ids - tool_ids))
tree = ElementTree(root)
tree.write(mf, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def parse_file_formats(formats_file):
"""
"""
supported_formats = []
if formats_file is not None:
line_number = 0
with open(formats_file) as f:
for line in f:
line_number += 1
if line is None or not line.strip() or line.strip().startswith("#"):
# ignore (it'd be weird to have something like:
# if line is not None and not (not line.strip()) ...
continue
parsed_formats = line.strip().split()
# valid lines contain either one or two columns
if len(parsed_formats) == 1:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[0]))
elif len(parsed_formats) == 2:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[1]))
elif len(parsed_formats) == 3:
composite = [tuple(x.split(":")) for x in parsed_formats[2].split(",")]
supported_formats.append(DataType(parsed_formats[0],
parsed_formats[1],
composite))
else:
logger.warning("Invalid line at line number %d of the given formats file. Line will be ignored:\n%s" % (line_number, line), 0)
return supported_formats
def get_fileformat_maps(supported_formats):
"""
convenience functions to compute dictionaries mapping
Galaxy data types <-> CTD formats
"""
o2g = {}
g2o = {}
for s in supported_formats:
if s.extension not in o2g:
o2g[s.extension] = s.galaxy_extension
if s.galaxy_extension not in g2o:
g2o[s.galaxy_extension] = s.extension
return g2o, o2g
def validate_and_prepare_args(args, model):
"""
check command line arguments
@param args command line arguments
@return None
"""
# check that only one of skip_tools_file and required_tools_file has been provided
if args.skip_tools_file is not None and args.required_tools_file is not None:
raise ApplicationException(
"You have provided both a file with tools to ignore and a file with required tools.\n"
"Only one of -s/--skip-tools, -r/--required-tools can be provided.")
# flatten macros_files to make sure that we have a list containing file names and not a list of lists
utils.flatten_list_of_lists(args, "macros_files")
utils.flatten_list_of_lists(args, "test_macros_files")
utils.flatten_list_of_lists(args, "test_macros_prefix")
# check that the arguments point to a valid, existing path
input_variables_to_check = ["skip_tools_file", "required_tools_file", "macros_files", "formats_file"]
for variable_name in input_variables_to_check:
utils.validate_argument_is_valid_path(args, variable_name)
# check that the provided output files, if provided, contain a valid file path (i.e., not a folder)
output_variables_to_check = ["data_types_destination", "tool_conf_destination"]
for variable_name in output_variables_to_check:
file_name = getattr(args, variable_name)
if file_name is not None and os.path.isdir(file_name):
raise ApplicationException("The provided output file name (%s) points to a directory." % file_name)
if not args.macros_files:
# list is empty, provide the default value
logger.warning("Using default macros from galaxy/macros.xml", 0)
args.macros_files = [os.path.dirname(os.path.abspath(__file__)) + "/macros.xml"]
if args.tool_version is None:
args.tool_version = model.version
def get_preferred_file_extension():
"""
get the file extension for the output files
@return "xml"
"""
return "xml"
def _convert_internal(parsed_ctds, **kwargs):
"""
parse all input files into models using CTDopts (via utils)
@param parsed_ctds the ctds
@param kwargs skip_tools, required_tools, and additional parameters for
expand_macros, create_command, create_inputs, create_outputs
@return a tuple containing the model, output destination, origin file
"""
parameter_hardcoder = kwargs["parameter_hardcoder"]
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
if kwargs["skip_tools"] is not None and model.name in kwargs["skip_tools"]:
logger.info("Skipping tool %s" % model.name, 0)
continue
elif kwargs["required_tools"] is not None and model.name not in kwargs["required_tools"]:
logger.info("Tool %s is not required, skipping it" % model.name, 0)
continue
origin_file = parsed_ctd.input_file
output_file = parsed_ctd.suggested_output_file
# overwrite attributes of the parsed ctd parameters as specified in hardcoded parameterd json
for param in utils.extract_and_flatten_parameters(model):
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(utils.extract_param_name(param), model.name, 'CTD')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
if not hasattr(param, a):
continue
if a == "type":
try:
t = GALAXY_TYPE_TO_TYPE[hardcoded_attributes[a]]
except KeyError:
logger.error("Could not set hardcoded attribute {}={} for {}".format(a, hardcoded_attributes[a], param.name))
sys.exit(1)
setattr(param, a, t)
elif type(getattr(param, a)) is _FileFormat or (param.type in [_InFile, _OutFile, _OutPrefix] and a == "restrictions"):
setattr(param, a, _FileFormat(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _Choices:
setattr(param, a, _Choices(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _NumericRange:
raise Exception("Overwriting of Numeric Range not implemented")
else:
setattr(param, a, hardcoded_attributes[a])
if "test_only" in kwargs and kwargs["test_only"]:
test = create_test_only(parsed_ctd.ctd_model, **kwargs)
tree = ElementTree(test)
output_file = parsed_ctd.suggested_output_file
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=False, pretty_print=True)
continue
logger.info("Converting {} (source {})".format(model.name, utils.get_filename(origin_file)), 0)
tool = create_tool(model,
kwargs.get("tool_profile", None),
kwargs.get("bump", None))
write_header(tool, model)
create_description(tool, model)
import_macros(tool, model, **kwargs)
expand_macros(tool, kwargs["macros_to_expand"])
# command, inputs, outputs = create_cio(tool, model, **kwargs)
create_command(tool, model, **kwargs)
create_configfiles(tool, model, **kwargs)
inputs = create_inputs(tool, model, **kwargs)
outputs = create_outputs(tool, model, **kwargs)
if kwargs["test_test"]:
create_tests(tool, inputs=copy.deepcopy(inputs), outputs=copy.deepcopy(outputs))
if kwargs["test_macros_prefix"]:
create_tests(tool, test_macros_prefix=kwargs['test_macros_prefix'], name=model.name)
create_help(tool, model)
# citations are required to be at the end
expand_macro(tool, "references")
# wrap our tool element into a tree to be able to serialize it
tree = ElementTree(tool)
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def write_header(tool, model):
"""
add comments to the tool header
@param tool the tool xml
@param model the ctd model
"""
tool.addprevious(etree.Comment(
"This is a configuration file for the integration of a tools into Galaxy (https://galaxyproject.org/). "
"This file was automatically generated using CTDConverter."))
tool.addprevious(etree.Comment('Proposed Tool Section: [%s]' % model.opt_attribs.get("category", "")))
def create_tool(model, profile, bump):
"""
initialize the tool
@param model the ctd model
"""
tool_id = model.name.replace(" ", "_")
if bump is None:
gxy_version = "@GALAXY_VERSION@"
elif model.name in bump:
gxy_version = str(bump[model.name])
elif tool_id in bump:
gxy_version = str(bump[tool_id])
else:
gxy_version = "@GALAXY_VERSION@"
attrib = OrderedDict([("id", tool_id),
("name", model.name),
("version", "@TOOL_VERSION@+galaxy" + gxy_version)])
if profile is not None:
attrib["profile"] = profile
return Element("tool", attrib)
def create_description(tool, model):
"""
add description to the tool
@param tool the Galaxy tool
@param model the ctd model
"""
if "description" in model.opt_attribs.keys() and model.opt_attribs["description"] is not None:
description = SubElement(tool, "description")
description.text = model.opt_attribs["description"]
def create_configfiles(tool, model, **kwargs):
"""
create
- <configfiles><inputs>
- <configfiles><configfile>
The former will create a json file containing the tool parameter values
that can be accessed in cheetah with $args_json. Note that
data_style="paths" (i.e. input data sets are included in the json) is set
even if input files are given on the CLI. Reason is that in this way
default values in the CTD can be restored for optional input files.
The latter will contain hardcoded parameters.
"""
configfiles_node = add_child_node(tool, "configfiles")
add_child_node(configfiles_node, "inputs",
OrderedDict([("name", "args_json"), ("data_style", "paths")]))
parameter_hardcoder = kwargs.get("parameter_hardcoder")
hc_dict = dict()
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is None:
continue
path = utils.extract_param_path(param)
for i, v in enumerate(path[:-1]):
try:
utils.getFromDict(hc_dict, path[:i + 1])
except KeyError:
utils.setInDict(hc_dict, path[:i + 1], {})
utils.setInDict(hc_dict, path, hardcoded_value)
hc_node = add_child_node(configfiles_node, "configfile",
OrderedDict([("name", "hardcoded_json")]))
hc_node.text = CDATA(json.dumps(hc_dict).replace('$', r'\$'))
# print(json.dumps(hc_dict))
def create_command(tool, model, **kwargs):
"""
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
# main command
final_cmd = OrderedDict([('preprocessing', []), ('command', []), ('postprocessing', [])])
advanced_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
final_cmd['preprocessing'].extend(["@QUOTE_FOO@", "@EXT_FOO@", "#import re", "", "## Preprocessing"])
# - call the executable with -write_ctd to write the ctd file (with defaults)
# - use fill_ctd.py to overwrite the defaults in the ctd file with the
# Galaxy parameters in the JSON file (from inputs config file)
# - feed the ctd file to the executable (with -ini)
# note: input and output file parameters are still given on the command line
# - output file parameters are not included in the JSON file
# - input and output files are accessed through links / files that have the correct extension
final_cmd['command'].extend(["", "## Main program call"])
final_cmd['command'].append("""
set -o pipefail &&
@EXECUTABLE@ -write_ctd ./ &&
python3 '$__tool_directory__/fill_ctd.py' '@EXECUTABLE@.ctd' '$args_json' '$hardcoded_json' &&
@EXECUTABLE@ -ini @EXECUTABLE@.ctd""")
final_cmd['command'].extend(kwargs["add_to_command_line"])
final_cmd['postprocessing'].extend(["", "## Postprocessing"])
advanced_command_start = "#if ${aon}cond.{aon}selector=='advanced':".format(aon=ADVANCED_OPTIONS_NAME)
advanced_command_end = "#end if"
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
param_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
command_line_prefix = utils.extract_command_line_prefix(param, model)
# TODO use utils.extract_param_name(param).replace(":", "_")? Then hardcoding ctd variables (with :) and tool variables (with _) can be distinguished
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
pass # TODO hardcoded values should go to <inputs>
# param_cmd['command'].append("%s %s" % (command_line_prefix, hardcoded_value))
else:
# in the else branch the parameter is neither blacklisted nor hardcoded...
_actual_parameter = get_galaxy_parameter_path(param)
actual_parameter = get_galaxy_parameter_path(param, fix_underscore=True)
# all but bool params need the command line argument (bools have it already in the true/false value)
if param.type is _OutFile or param.type is _OutPrefix or param.type is _InFile:
param_cmd['command'].append(command_line_prefix)
# preprocessing for file inputs:
# - create a dir with name param.name
# - create a link to id.ext in this directory
# rationale: in the autogenerated tests the same file was used as input to multiple parameters
# this leads to conflicts while linking... might also be better in general
if param.type is _InFile:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
if param.is_list:
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + _actual_parameter + ") if f])} && ")
param_cmd['preprocessing'].append("${' '.join([\"ln -s '%s' '" + actual_parameter + "/%s/%s.%s' && \" % (f, i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
else:
param_cmd['preprocessing'].append("ln -s '$" + _actual_parameter + "' '" + actual_parameter + "/${re.sub(\"[^\\w\\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)' &&")
param_cmd['command'].append("'" + actual_parameter + "/${re.sub(\"[^\\w\\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)'")
elif param.type is _OutPrefix:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
param_cmd['command'].append(actual_parameter + "/")
elif param.type is _OutFile:
_actual_parameter = get_galaxy_parameter_path(param, separator="_")
actual_parameter = get_galaxy_parameter_path(param, separator="_", fix_underscore=True)
# check if there is a parameter that sets the format
# if so we add an extension to the generated files which will be used to
# determine the format in the output tag
# in all other cases (corresponding input / there is only one allowed format)
# the format will be set in the output tag
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[param.type])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
# print("ci %s ffc %s" % (corresponding_input.name, fmt_from_corresponding))
# print("formats %s" % (formats))
if corresponding_input is not None:
actual_input_parameter = get_galaxy_parameter_path(corresponding_input)
else:
actual_input_parameter = None
# print(len(formats) > 1, (corresponding_input is None or not
# fmt_from_corresponding))
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
else:
type_param_name = None
# print("tp %s" % type_param_name)
param_cmd['preprocessing'].append("mkdir " + actual_parameter + " &&")
# if there is only one format (the outoput node sets format using the format attribute of the data/discover node)
# - single file: write to temp file with oms extension and move this to the actual result file
# - lists: write to files with the oms extension and remove the extension afterwards (discovery with __name__)
if len(formats) == 1:
fmt = formats.pop()
if param.is_list:
logger.info(f"1 fmt + list {param.name} -> {actual_input_parameter}", 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(\"" + fmt + "\")) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s' '" + _actual_parameter + "/%(bn)s/%(id)s'\"%{\"bn\": i, \"id\": re.sub('[^\\w\\-_]', '_', f.element_identifier), \"gext\": $gxy2omsext(\"" + fmt + "\")} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("1 fmt + dataset %s" % param.name, 1)
param_cmd['command'].append("'" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}' '$" + _actual_parameter + "'")
# if there is a type parameter then we use the type selected by the user
# - single: write to temp file with the oms extension and mv it to the actual file output which is treated via change_format
# - list: let the command create output files with the oms extensions, postprocessing renames them to the galaxy extensions, output is then discover + __name_and_ext__
elif type_param_name is not None:
if param.is_list:
logger.info("type + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $" + type_param_name + ") for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(omsext)s' '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s'\"%{\"bn\": i, \"id\": re.sub('[^\\w\\-_]', '_', f.element_identifier), \"omsext\":$" + type_param_name + ", \"gext\": $oms2gxyext(str($" + type_param_name + "))} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("type + dataset %s" % param.name, 1)
# 1st create file with openms extension (often required by openms)
# then move it to the actual place specified by the parameter
# the format is then set by the <data> tag using <change_format>
param_cmd['command'].append("'" + actual_parameter + "/output.${" + type_param_name + "}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + type_param_name + "}' '$" + actual_parameter + "'")
elif actual_input_parameter is not None:
if param.is_list:
logger.info("actual + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), f.ext) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info(f"actual + dataset {param.name} {actual_input_parameter} {corresponding_input.is_list}", 1)
if corresponding_input.is_list:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}' '$" + _actual_parameter + "'")
else:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}' '$" + _actual_parameter + "'")
else:
if param.is_list:
raise Exception("output parameter itemlist %s without corresponding input")
else:
logger.info("else + dataset %s" % param.name, 1)
param_cmd['command'].append("'$" + _actual_parameter + "'")
# # select with multiple = true
# elif is_selection_parameter(param) and param.is_list:
# param_cmd['command'].append("${' '.join(['\"%s\"'%str(_) for _ in str($" + actual_parameter + ").split(',')])}")
# elif param.is_list:
# param_cmd['command'].append("$quote($%s" % actual_parameter + ")")
# #command += "${' '.join([\"'%s'\"%str(_) for _ in $" + actual_parameter + "])}\n"
# elif is_boolean_parameter(param):
# param_cmd['command'].append("$%s" % actual_parameter + "")
# else:
# param_cmd['command'].append('"$' + actual_parameter + '"')
# add if statement for optional parameters and preprocessing
# - for optional outputs (param_out_x) the presence of the parameter
# depends on the additional input (param_x) -> need no if
# - real string parameters (i.e. ctd type string wo restrictions) also
# need no if (otherwise the empty string could not be provided)
if not (param.required or is_boolean_parameter(param) or (param.type is str and param.restrictions is None)):
# and not(param.type is _InFile and param.is_list):
actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG", fix_underscore=True)
_actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG")
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
# special case for optional itemlists: for those if no option is selected only the parameter must be specified
if is_selection_parameter(param) and param.is_list and param.required is False:
param_cmd[stage] = [param_cmd[stage][0]] + ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage][1:]) + ["#end if"]
elif is_selection_parameter(param) or param.type is _InFile:
param_cmd[stage] = ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage]) + ["#end if"]
elif param.type is _OutFile or param.type is _OutPrefix:
param_cmd[stage] = ["#if \"" + param.name + "_FLAG\" in str($OPTIONAL_OUTPUTS).split(',')"] + utils.indent(param_cmd[stage]) + ["#end if"]
else:
param_cmd[stage] = ["#if str($" + _actual_parameter + "):"] + utils.indent(param_cmd[stage]) + ["#end if"]
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
if param.advanced and hardcoded_value is None and not (param.type is _OutFile or param.type is _OutPrefix):
advanced_cmd[stage].extend(param_cmd[stage])
else:
final_cmd[stage].extend(param_cmd[stage])
for stage in advanced_cmd:
if len(advanced_cmd[stage]) == 0:
continue
advanced_cmd[stage] = [advanced_command_start] + utils.indent(advanced_cmd[stage]) + [advanced_command_end]
final_cmd[stage].extend(advanced_cmd[stage])
out, optout = all_outputs(model, parameter_hardcoder)
if len(optout) > 0 or len(out) + len(optout) == 0:
stdout = ["| tee '$stdout'"]
if len(optout) > 0:
stdout = ["#if len(str($OPTIONAL_OUTPUTS).split(',')) == 0"] + utils.indent(stdout) + ["#end if"]
final_cmd['command'].extend(stdout)
ctd_out = ["#if \"ctd_out_FLAG\" in $OPTIONAL_OUTPUTS"] + utils.indent(["&& mv '@EXECUTABLE@.ctd' '$ctd_out'"]) + ["#end if"]
final_cmd['postprocessing'].extend(ctd_out)
command_node = add_child_node(tool, "command")
command_node.attrib["detect_errors"] = "exit_code"
command_node.text = CDATA("\n".join(sum(final_cmd.values(), [])))
def import_macros(tool, model, **kwargs):
"""
creates the xml elements needed to import the needed macros files
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
macros_node = add_child_node(tool, "macros")
token_node = add_child_node(macros_node, "token")
token_node.attrib["name"] = "@EXECUTABLE@"
token_node.text = utils.extract_tool_executable_path(model, kwargs["default_executable_path"])
# add <import> nodes
for macro_file_name in kwargs["macros_file_names"] + kwargs["test_macros_file_names"]:
macro_file = open(macro_file_name)
import_node = add_child_node(macros_node, "import")
# do not add the path of the file, rather, just its basename
import_node.text = os.path.basename(macro_file.name)
def expand_macro(node, macro, attribs=None):
"""Add <expand macro="..." ... /> to node."""
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = macro
if attribs:
for a in attribs:
expand_node.attrib[a] = attribs[a]
return expand_node
# and to "expand" the macros in a node
def expand_macros(node, macros_to_expand):
# add <expand> nodes
for expand_macro in macros_to_expand:
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = expand_macro
def get_galaxy_parameter_path(param, separator=".", suffix=None, fix_underscore=False):
"""
Get the complete path for a parameter as a string where the path
components are joined by the given separator. A given suffix can
be appended.
"""
p = get_galaxy_parameter_name(param, suffix, fix_underscore)
path = utils.extract_param_path(param, fix_underscore)
if len(path) > 1:
return (separator.join(path[:-1]) + separator + p).replace("-", "_")
elif param.advanced and (param.type is not _OutFile or suffix):
return ADVANCED_OPTIONS_NAME + "cond." + p
else:
return p
def get_galaxy_parameter_name(param, suffix=None, fix_underscore=False):
"""
get the name of the parameter used in the galaxy tool
- replace : and - by _
- add suffix for output parameters if not None
the idea of suffix is to be used for optional outputs (out_x) for
which an additional boolean input (out_x_FLAG) exists
@param param the parameter
@param suffix suffix to append
@return the name used for the parameter in the tool form
"""
p = param.name.replace("-", "_")
if fix_underscore and p.startswith("_"):
p = p[1:]
if param.type is _OutFile and suffix is not None:
return f"{p}_{suffix}"
else:
return "%s" % p
def get_out_type_param(out_param, model, parameter_hardcoder):
"""
check if there is a parameter that has the same name with appended _type
and return it if present, otherwise return None
"""
if parameter_hardcoder.get_blacklist(out_param.name + "_type", model.name):
return None
for param in utils.extract_and_flatten_parameters(model):
if param.name == out_param.name + "_type":
return param
return None
def is_in_type_param(param, model):
return is_type_param(param, model, [_InFile])
def is_out_type_param(param, model):
"""
check if the parameter is output_type parameter
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
return is_type_param(param, model, [_OutFile, _OutPrefix])
def is_type_param(param, model, tpe):
"""
check if the parameter is _type parameter of an in/output
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
if not param.name.endswith("_type"):
return False
for out_param in utils.extract_and_flatten_parameters(model):
if out_param.type not in tpe:
continue
if param.name == out_param.name + "_type":
return True
return False
def get_corresponding_input(out_param, model):
"""
get the input parameter corresponding to the given output
1st try to get the input with the type (single file/list) and same format restrictions
if this fails get the input that has the same type
in both cases there must be only one such input
return the found input parameter and True iff the 1st case applied
"""
c = get_input_with_same_restrictions(out_param, model, True)
if c is None:
return (get_input_with_same_restrictions(out_param, model, False), False)
else:
return (c, True)
def get_input_with_same_restrictions(out_param, model, check_formats):
"""
get the input parameter that has the same restrictions (ctd file_formats)
- input and output must both be lists of both be simple parameters
"""
matching = []
for allow_different_type in [False, True]:
for param in utils.extract_and_flatten_parameters(model):
if param.type is not _InFile:
continue
# logger.error("%s %s %s %s %s %s" %(out_param.name, param.name, param.is_list, out_param.is_list, param.restrictions, out_param.restrictions))
if allow_different_type or param.is_list == out_param.is_list:
if check_formats:
if param.restrictions is None and out_param.restrictions is None:
matching.append(param)
elif param.restrictions is not None and out_param.restrictions is not None and param.restrictions.formats == out_param.restrictions.formats:
matching.append(param)
else:
matching.append(param)
# logger.error("match %s "%([_.name for _ in matching]))
if len(matching) > 0:
break
if len(matching) == 1:
return matching[0]
else:
return None
def create_inputs(tool, model, **kwargs):
"""
create input section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
@return inputs node
"""
inputs_node = SubElement(tool, "inputs")
section_nodes = dict()
section_params = dict()
# some suites (such as OpenMS) need some advanced options when handling inputs
advanced_node = Element("expand", OrderedDict([("macro", ADVANCED_OPTIONS_NAME + "macro")]))
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
# treat all non output-file/advanced/blacklisted/hardcoded parameters as inputs
for param in utils.extract_and_flatten_parameters(model, True):
if type(param) is ParameterGroup:
title, help_text = generate_label_and_help(param.description)
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = Element("section", OrderedDict([("name", param.name), ("title", title), ("help", help_text), ("expanded", "false")]))
continue
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
continue
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
# do not output file type parameters for inputs since file types are
# known by Galaxy and set automatically by extension (which comes from
# the Galaxy data type which is translated to OpenMS datatype as defined
# in filetypes.txt )
if is_in_type_param(param, model):
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent_node = section_nodes[utils.extract_param_name(param.parent)]
elif param.advanced:
parent_node = advanced_node
else:
parent_node = inputs_node
# sometimes special inputs are needed for outfiles:
if param.type is _OutFile or param.type is _OutPrefix:
# if there are multiple possible output formats, but no parameter to choose the type or a
# corresponding input then add a selection parameter
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if len(formats) > 1 and type_param is None and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
fmt_select = add_child_node(parent_node, "param", OrderedDict([("name", param.name + "_type"), ("type", "select"), ("optional", "false"), ("label", f"File type of output {param.name} ({param.description})")]))
g2o, o2g = get_fileformat_maps(kwargs["supported_file_formats"])
# for f in formats:
# option_node = add_child_node(fmt_select, "option", OrderedDict([("value", g2o[f])]), f)
for choice in param.restrictions.formats:
option_node = add_child_node(fmt_select, "option", OrderedDict([("value", str(choice))]))
option_node.text = o2g[str(choice)]
if choice.lower() != o2g[str(choice)]:
option_node.text += " (%s)" % choice
continue
# create the actual param node and fill the attributes
param_node = add_child_node(parent_node, "param")
create_param_attribute_list(param_node, param, model, kwargs["supported_file_formats"])
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(param.name, model.name, 'XML')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
param_node.attrib[a] = str(hardcoded_attributes[a])
section_parents = [utils.extract_param_name(section_params[sn].parent) for sn in section_nodes]
for sn in section_nodes:
if len(section_nodes[sn]) == 0 and sn not in section_parents:
continue
if utils.extract_param_name(section_params[sn].parent) in section_nodes:
section_nodes[utils.extract_param_name(section_params[sn].parent)].append(section_nodes[sn])
else:
inputs_node.append(section_nodes[sn])
# if there is an advanced section then append it at the end of the inputs
inputs_node.append(advanced_node)
# Add select for optional outputs
out, optout = all_outputs(model, parameter_hardcoder)
attrib = OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("type", "select"),
("optional", "true"),
("multiple", "true"),
("label", "Optional outputs")])
# if len(out) == 0 and len(out) + len(optout) > 0:
# attrib["optional"] = "false"
# else:
# attrib["optional"] = "true"
param_node = add_child_node(inputs_node, "param", attrib)
for o in optout:
title, help_text = generate_label_and_help(o.description)
option_node = add_child_node(param_node, "option",
OrderedDict([("value", o.name + "_FLAG")]),
text=f"{o.name} ({title})")
option_node = add_child_node(param_node, "option",
OrderedDict([("value", "ctd_out_FLAG")]),
text="Output used ctd (ini) configuration file")
return inputs_node
def is_default(value, param):
"""
check if the value is the default of the param or if the value is in the defaults of param
"""
return param.default == value or (type(param.default) is list and value in param.default)
def get_formats(param, model, o2g):
"""
determine format attribute from the CTD restictions (i.e. the OpenMS extensions)
- also check if all listed possible formats are supported in Galaxy and warn if necessary
"""
if param.restrictions is None:
return []
elif type(param.restrictions) is _FileFormat:
choices = param.restrictions.formats
elif is_out_type_param(param, model):
choices = param.restrictions.choices
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
# check if there are formats that have not been registered yet...
formats = set()
for format_name in choices:
if format_name not in o2g:
logger.warning(f"Ignoring unknown format {format_name} for parameter {param.name}", 1)
else:
formats.add(format_name)
return sorted(formats)
def get_galaxy_formats(param, model, o2g, default=None):
"""
determine galaxy formats for a parm (i.e. list of allowed Galaxy extensions)
from the CTD restictions (i.e. the OpenMS extensions)
- if there is a single one, then take this
- if there is none than use given default
"""
formats = get_formats(param, model, o2g)
gxy_formats = {o2g[_] for _ in formats if _ in o2g}
if len(gxy_formats) == 0:
if default is not None:
gxy_formats.add(default)
else:
raise InvalidModelException("No supported formats [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
return sorted(gxy_formats)
def create_param_attribute_list(param_node, param, model, supported_file_formats):
"""
get the attributes of input parameters
@param param_node the galaxy tool param node
@param param the ctd parameter
@param supported_file_formats
"""
g2o, o2g = get_fileformat_maps(supported_file_formats)
# set the name, argument and a first guess for the type (which will be over written
# in some cases .. see below)
# even if the conversion relies on the fact that the param names are identical
# to the ctd ITEM names we replace dashes by underscores because input and output
# parameters need to be treated in cheetah. variable names are currently fixed back
# to dashes in fill_ctd.py. currently there seems to be only a single tool
# requiring this https://github.com/OpenMS/OpenMS/pull/4529
param_node.attrib["name"] = get_galaxy_parameter_name(param)
param_node.attrib["argument"] = "-%s" % utils.extract_param_name(param)
param_type = TYPE_TO_GALAXY_TYPE[param.type]
if param_type is None:
raise ModelError("Unrecognized parameter type %(type)s for parameter %(name)s"
% {"type": param.type, "name": param.name})
# ITEMLIST is rendered as text field (even if its integers or floats), an
# exception is files which are treated a bit below
if param.is_list:
param_type = "text"
if is_selection_parameter(param):
param_type = "select"
if len(param.restrictions.choices) < 5:
param_node.attrib["display"] = "checkboxes"
if param.is_list:
param_node.attrib["multiple"] = "true"
if is_boolean_parameter(param):
param_type = "boolean"
if param.type is _InFile:
# assume it's just text unless restrictions are provided
param_node.attrib["type"] = "data"
param_node.attrib["format"] = ",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile]))
# in the case of multiple input set multiple flag
if param.is_list:
param_node.attrib["multiple"] = "true"
else:
param_node.attrib["type"] = param_type
# set the optional attribute of parameters
#
# OpenMS uses sets text, int, select, bool parameters that have a default
# as optional (required=False), the default value is set implicitly if no
# value is given.
# This is reasonable for the CLI because one certainly does not want the
# user to specify the default manually for all parameters.
# For Galaxy tools setting these parameters as required leads to the
# equivalent behavior. Assuming required is better because it makes
# the implicit setting of parameters more transparent to the user
# (in Galaxy the default would be prefilled in the form and at least
# one option needs to be selected).
if not (param.default is None or type(param.default) is _Null) and param_node.attrib["type"] in ["integer", "float", "text", "boolean", "select"]:
logger.error("%s %s %s %s %s" % (param.name, param.default is None, type(param.default) is _Null, param_type, param.type))
param_node.attrib["optional"] = "false"
else:
param_node.attrib["optional"] = str(not param.required).lower()
# check for parameters with restricted values (which will correspond to a "select" in galaxy)
if param.restrictions is not None or param_type == "boolean":
# it could be either _Choices or _NumericRange, with special case for boolean types
if param_type == "boolean":
create_boolean_parameter(param_node, param)
elif type(param.restrictions) is _Choices:
# TODO if the parameter is used to select the output file type the
# options need to be replaced with the Galaxy data types
# if is_out_type_param(param, model):
# param.restrictions.choices = get_supported_file_types(param.restrictions.choices, supported_file_formats)
# create as many <option> elements as restriction values
if is_out_type_param(param, model):
logger.warning(f"{param.name} {param.type}")
formats = get_formats(param, model, o2g)
for fmt in formats:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(fmt))]))
option_node.text = o2g[str(fmt)]
if fmt.lower() != o2g[str(fmt)]:
option_node.text += " (%s)" % fmt
if is_default(fmt, param):
option_node.attrib["selected"] = "true"
else:
for choice in param.restrictions.choices:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(choice))]),
text=str(choice))
if is_default(choice, param):
option_node.attrib["selected"] = "true"
# add validator to check that "nothing selected" is not seletcedto mandatory options w/o default
if param_node.attrib["optional"] == "False" and (param.default is None or type(param.default) is _Null):
validator_node = add_child_node(param_node, "validator", OrderedDict([("type", "expression"), ("message", "A value needs to be selected")]))
validator_node.text = 'value != "select a value"'
# numeric ranges (which appear for int and float ITEMS and ITEMLISTS)
# these are reflected by min and max attributes
# since item lists become text parameters + validator these don't need these attributes
elif type(param.restrictions) is _NumericRange and param_type == "text":
pass
elif type(param.restrictions) is _NumericRange and param_type != "text":
if param.type is not int and param.type is not float:
raise InvalidModelException("Expected either 'int' or 'float' in the numeric range restriction for "
"parameter [%(name)s], but instead got [%(type)s]" %
{"name": param.name, "type": type(param.restrictions)})
# extract the min and max values and add them as attributes
# validate the provided min and max values
if param.restrictions.n_min is not None:
param_node.attrib["min"] = str(param.restrictions.n_min)
if param.restrictions.n_max is not None:
param_node.attrib["max"] = str(param.restrictions.n_max)
elif type(param.restrictions) is _FileFormat:
# has already been handled
pass
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] for parameter [%(name)s]"
% {"type": type(param.restrictions), "name": param.name})
if param_type == "text":
# for repeats (which are rendered as text field in the tool form) that are actually
# integer/floats special validation is necessary (try to convert them and check if
# in the min max range if a range is given)
if TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]:
valsan = expand_macro(param_node,
"list_%s_valsan" % TYPE_TO_GALAXY_TYPE[param.type],
dict([("name", get_galaxy_parameter_name(param))]))
if type(param.restrictions) is _NumericRange and not (param.restrictions.n_min is None and param.restrictions.n_max is None):
expression = "len(value.split(' ')) == len([_ for _ in value.split(' ') if "
message = "a space separated list of %s values " % TYPE_TO_GALAXY_TYPE[param.type]
if param.restrictions.n_min is not None and param.restrictions.n_max is not None:
expression += f" {param.restrictions.n_min} <= {param.type.__name__}(_) <= {param.restrictions.n_max}"
message += f"in the range {param.restrictions.n_min}:{param.restrictions.n_max} "
elif param.restrictions.n_min is not None:
expression += f" {param.restrictions.n_min} <= {param.type.__name__}(_)"
message += "in the range %s: " % (param.restrictions.n_min)
elif param.restrictions.n_max is not None:
expression += f" {param.type.__name__}(_) <= {param.restrictions.n_max}"
message += "in the range :%s " % (param.restrictions.n_min)
expression += "])\n"
message += "is required"
validator_node = SubElement(valsan, "validator", OrderedDict([("type", "expression"), ("message", message)]))
validator_node.text = CDATA(expression)
else:
# add quotes to the default values (only if they include spaces .. then the UI looks nicer)
if not (param.default is None or type(param.default) is _Null) and param.type is not _InFile:
if type(param.default) is list:
for i, d in enumerate(param.default):
if " " in d:
param.default[i] = '"%s"' % d
# elif " " in param.default:
# param.default = '"%s"' %param.default
# add sanitizer nodes to
# - text (only those that are not actually integer selects which are treated above) and
# - select params,
# this is needed for special character like "[" which are used for example by FeatureFinderMultiplex
if ((param_type == "text" and not TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]) or is_selection_parameter(param)) and param.type is not _InFile:
if param.is_list and not is_selection_parameter(param):
valsan = expand_macro(param_node, "list_string_val",
dict([("name", get_galaxy_parameter_name(param))]))
valsan = expand_macro(param_node, "list_string_san",
dict([("name", get_galaxy_parameter_name(param))]))
# check for default value
if not (param.default is None or type(param.default) is _Null):
# defaults of selects are set via the selected attribute of the options (happens above)
if param_type == "select":
pass
elif type(param.default) is list:
# we ASSUME that a list of parameters looks like:
# $ tool -ignore He Ar Xe
# meaning, that, for example, Helium, Argon and Xenon will be ignored
param_node.attrib["value"] = ' '.join(map(str, param.default))
elif param_type != "boolean":
param_node.attrib["value"] = str(param.default)
else:
# simple boolean with a default
if param.default is True:
param_node.attrib["checked"] = "true"
elif param.type is int or param.type is float or param.type is str:
if param_type == "select":
pass
else:
param_node.attrib["value"] = ""
# add label, help, and argument
label = "%s parameter" % param.name
help_text = ""
if param.description is not None:
label, help_text = generate_label_and_help(param.description)
if param.is_list and not is_selection_parameter(param) and param.type is not _InFile:
help_text += " (space separated list, in order to allow for spaces in list items surround them by single quotes)"
if param.type is _InFile:
help_text += " select %s data sets(s)" % (",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile])))
param_node.attrib["label"] = label
param_node.attrib["help"] = help_text
def generate_label_and_help(desc):
help_text = ""
# This tag is found in some descriptions
if not isinstance(desc, str):
desc = str(desc)
# desc = desc.encode("utf8")
desc = desc.replace("#br#", ". ")
# Get rid of dots in the end
if desc.endswith("."):
desc = desc.rstrip(".")
# Check if first word is a normal word and make it uppercase
if str(desc).find(" ") > -1:
first_word, rest = str(desc).split(" ", 1)
if str(first_word).islower():
# check if label has a quotient of the form a/b
if first_word.find("/") != 1:
first_word.capitalize()
desc = first_word + " " + rest
# label = desc.decode("utf8")
label = desc
# split delimiters ".,?!;("
if len(desc) > 50:
m = re.search(r"([.?!] |e\.g\.|\(e\.g\.|i\.e\.|\(i\.e\.)", desc)
if m is not None:
label = desc[:m.start()].rstrip(".?!, ")
help_text = desc[m.start():].lstrip(".?!, ")
# # Try to split the label if it is too long
# if len(desc) > 50:
# # find an example and put everything before in the label and the e.g. in the help
# if desc.find("e.g.") > 1 :
# label, help_text = desc.split("e.g.",1)
# help_text = "e.g." + help_text
# else:
# # find the end of the first sentence
# # look for ". " because some labels contain .file or something similar
# delimiter = ""
# if desc.find(". ") > 1 and desc.find("? ") > 1:
# if desc.find(". ") < desc.find("? "):
# delimiter = ". "
# else:
# delimiter = "? "
# elif desc.find(". ") > 1:
# delimiter = ". "
# elif desc.find("? ") > 1:
# delimiter = "? "
# if delimiter != "":
# label, help_text = desc.split(delimiter, 1)
#
# # add the question mark back
# if delimiter == "? ":
# label += "? "
# remove all linebreaks
label = label.rstrip().rstrip('<br>').rstrip()
return label, help_text
def is_boolean_parameter(param):
"""
determines if the given choices are boolean (basically, if the possible values are true/false)
@param param the ctd parameter
@return True iff a boolean parameter
"""
# detect boolean selects of OpenMS
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) == {"true", "false"}
else:
return param.type is bool
def is_selection_parameter(param):
"""
determines if there are choices for the parameter and its not bool
@param param the ctd parameter
@return True iff a selection parameter
"""
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) != {"true", "false"}
else:
return False
def get_lowercase_list(some_list):
return [str(_).lower().strip() for _ in some_list]
def create_boolean_parameter(param_node, param):
"""
creates a galaxy boolean parameter type
this method assumes that param has restrictions, and that only two restictions are present
(either yes/no or true/false)
TODO: true and false values can be way more than 'true' and 'false'
but for that we need CTD support
"""
# in ctd (1.6.2) bools are strings with restriction true,false
# - if the default is false then they are flags
# - otherwise the true or false value needs to be added (where the true case is unnecessary)
# A special case are restrictions false,true which are not treated as flags
if param.type == str:
choices = get_lowercase_list(param.restrictions.choices)
if set(choices) == {"true", "false"}:
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
else:
param_node.attrib["truevalue"] = choices[0]
param_node.attrib["falsevalue"] = choices[1]
# set the checked attribute
if param.default is not None:
checked_value = "false"
default = param.default.lower().strip()
if default == "yes" or default == "true":
checked_value = "true"
param_node.attrib["checked"] = checked_value
else:
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
param_node.attrib["checked"] = str(param.default).lower()
if "optional" in param_node.attrib:
del param_node.attrib["optional"]
def all_outputs(model, parameter_hardcoder):
"""
return lists of reqired and optional output parameters
"""
out = []
optout = []
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if not (param.type is _OutFile or param.type is _OutPrefix):
continue
if not param.required:
optout.append(param)
else:
out.append(param)
return out, optout
def output_filter_text(param):
"""
get the text or the filter for optional outputs
"""
return '"%s_FLAG" in OPTIONAL_OUTPUTS' % param.name
def create_outputs(parent, model, **kwargs):
"""
create outputs section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
- parameter_hardcoder and
- supported_file_formats ()
"""
outputs_node = add_child_node(parent, "outputs")
parameter_hardcoder = kwargs["parameter_hardcoder"]
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if param.type is not _OutFile and param.type is not _OutPrefix:
continue
create_output_node(outputs_node, param, model, kwargs["supported_file_formats"], parameter_hardcoder)
# If there are no outputs defined in the ctd the node will have no children
# and the stdout will be used as output
out, optout = all_outputs(model, parameter_hardcoder)
if len(out) == 0:
stdout = add_child_node(outputs_node, "data",
OrderedDict([("name", "stdout"), ("format", "txt"),
("label", "${tool.name} on ${on_string}: stdout"),
("format", "txt")]))
add_child_node(stdout, "filter", text="OPTIONAL_OUTPUTS is None")
# manually add output for the ctd file
ctd_out = add_child_node(outputs_node, "data", OrderedDict([("name", "ctd_out"), ("format", "xml"), ("label", "${tool.name} on ${on_string}: ctd")]))
add_child_node(ctd_out, "filter", text='OPTIONAL_OUTPUTS is not None and "ctd_out_FLAG" in OPTIONAL_OUTPUTS')
return outputs_node
def create_output_node(parent, param, model, supported_file_formats, parameter_hardcoder):
g2o, o2g = get_fileformat_maps(supported_file_formats)
# add a data node / collection + discover_datasets
# in the former case we just set the discover_node equal to the data node
# then we can just use this to set the common format attribute
if not param.is_list and param.type is not _OutPrefix:
data_node = add_child_node(parent, "data")
discover_node = data_node
else:
data_node = add_child_node(parent, "collection")
data_node.attrib["type"] = "list"
discover_node = add_child_node(data_node, "discover_datasets",
OrderedDict([("directory", get_galaxy_parameter_path(param, separator="_")),
("recurse", "true")]))
data_node.attrib["name"] = get_galaxy_parameter_path(param, separator="_")
data_node.attrib["label"] = "${tool.name} on ${on_string}: %s" % utils.extract_param_name(param)
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
type_param_choices = get_formats(param, model, o2g) # [_ for _ in type_param.restrictions.choices]
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
type_param_choices = get_formats(param, model, o2g)
else:
type_param_name = None
# if there is only a single possible output format we set this
# logger.error("%s %s %s %s %s" %(param.name, formats, type_param, fmt_from_corresponding, corresponding_input))
if len(formats) == 1:
logger.info(f"OUTPUT {param.name} 1 fmt {formats}", 1)
discover_node.attrib["format"] = formats.pop()
if param.is_list:
discover_node.attrib["pattern"] = "__name__"
elif param.type is _OutPrefix:
discover_node.attrib["pattern"] = r"_?(?P<designation>.*)\.[^.]*"
# if there is another parameter where the user selects the format
# then this format was added as file extension on the CLI, now we can discover this
elif type_param_name is not None:
logger.info("OUTPUT %s type" % param.name, 1)
if not param.is_list:
if len(type_param_choices) > 1:
change_node = add_child_node(data_node, "change_format")
for i, r in enumerate(type_param_choices):
f = o2g.get(r, None)
# TODO this should not happen for fully specified fileformats file
if f is None:
f = r
if i == 0:
data_node.attrib["format"] = f
else:
add_child_node(change_node, "when", OrderedDict([("input", type_param_name), ("value", r), ("format", f)]))
else:
discover_node.attrib["pattern"] = "__name_and_ext__"
elif corresponding_input is not None:
logger.info(f"OUTPUT {param.name} input {corresponding_input.name}", 1)
if param.is_list:
discover_node.attrib["pattern"] = "__name_and_ext__"
# data_node.attrib["structured_like"] = get_galaxy_parameter_name(corresponding_input)
# data_node.attrib["inherit_format"] = "true"
else:
data_node.attrib["format_source"] = get_galaxy_parameter_path(corresponding_input)
data_node.attrib["metadata_source"] = get_galaxy_parameter_path(corresponding_input)
else:
logger.info("OUTPUT %s else" % (param.name), 1)
if not param.is_list:
data_node.attrib["auto_format"] = "true"
else:
raise InvalidModelException("No way to know the format for"
"for output [%(name)s]" % {"name": param.name})
# # data output has fomat (except if fromat_source has been added already)
# # note .. collection output has no format
# if not param.is_list and not "format_source" in data_node.attrib:
# data_node.attrib["format"] = data_format
# add filter for optional parameters
if not param.required:
filter_node = add_child_node(data_node, "filter")
filter_node.text = "OPTIONAL_OUTPUTS is not None and " + output_filter_text(param)
return data_node
def get_supported_file_types(formats, supported_file_formats):
r = set()
for f in formats:
if f in supported_file_formats:
r.add(supported_file_formats[f].galaxy_extension)
return r
# print f, f in supported_file_formats, supported_file_formats[f].galaxy_extension
# return set([supported_file_formats[_].galaxy_extension
# for _ in formats if _ in supported_file_formats])
def create_change_format_node(parent, data_formats, input_ref):
# <change_format>
# <when input="secondary_structure" value="true" format="txt"/>
# </change_format>
change_format_node = add_child_node(parent, "change_format")
for data_format in data_formats:
add_child_node(change_format_node, "when",
OrderedDict([("input", input_ref), ("value", data_format), ("format", data_format)]))
def create_tests(parent, inputs=None, outputs=None, test_macros_prefix=None, name=None):
"""
create tests section of the Galaxy tool
@param tool the Galaxy tool
@param inputs a copy of the inputs
"""
tests_node = add_child_node(parent, "tests")
if not (inputs is None or outputs is None):
fidx = 0
test_node = add_child_node(tests_node, "test")
strip_elements(inputs, "validator", "sanitizer")
for node in inputs.iter():
if node.tag == "expand" and node.attrib["macro"] == ADVANCED_OPTIONS_NAME + "macro":
node.tag = "conditional"
node.attrib["name"] = ADVANCED_OPTIONS_NAME + "cond"
add_child_node(node, "param", OrderedDict([("name", ADVANCED_OPTIONS_NAME + "selector"), ("value", "advanced")]))
if "type" not in node.attrib:
continue
if (node.attrib["type"] == "select" and "true" in {_.attrib.get("selected", "false") for _ in node}) or\
(node.attrib["type"] == "select" and node.attrib.get("value", "") != ""):
node.tag = "delete_node"
continue
# TODO make this optional (ie add aparameter)
if node.attrib.get("optional", None) == "true" and node.attrib["type"] != "boolean":
node.tag = "delete_node"
continue
if node.attrib["type"] == "boolean":
if node.attrib["checked"] == "true":
node.attrib["value"] = "true" # node.attrib["truevalue"]
else:
node.attrib["value"] = "false" # node.attrib["falsevalue"]
elif node.attrib["type"] == "text" and node.attrib["value"] == "":
node.attrib["value"] = "1 2" # use a space separated list here to cover the repeat (int/float) case
elif node.attrib["type"] == "integer" and node.attrib["value"] == "":
node.attrib["value"] = "1"
elif node.attrib["type"] == "float" and node.attrib["value"] == "":
node.attrib["value"] = "1.0"
elif node.attrib["type"] == "select":
if node.attrib.get("display", None) == "radio" or node.attrib.get("multiple", "false") == "false":
node.attrib["value"] = node[0].attrib["value"]
elif node.attrib.get("multiple", None) == "true":
node.attrib["value"] = ",".join([_.attrib["value"] for _ in node if "value" in _.attrib])
elif node.attrib["type"] == "data":
node.attrib["ftype"] = node.attrib["format"].split(',')[0]
if node.attrib.get("multiple", "false") == "true":
node.attrib["value"] = "{fidx}test.ext,{fidx}test2.ext".format(fidx=fidx)
else:
node.attrib["value"] = f"{fidx}test.ext"
fidx += 1
for node in inputs.iter():
for a in set(node.attrib) - {"name", "value", "ftype"}:
del node.attrib[a]
strip_elements(inputs, "delete_node", "option", "expand")
for node in inputs:
test_node.append(node)
outputs_cnt = 0
for node in outputs.iter():
if node.tag == "data" or node.tag == "collection":
# assuming that all filters avaluate to false
has_filter = False
for c in node:
if c.tag == "filter":
has_filter = True
break
if not has_filter:
outputs_cnt += 1
else:
node.tag = "delete_node"
if node.tag == "data":
node.tag = "output"
try:
node.attrib["ftype"] = node.attrib["format"]
except KeyError:
pass
node.attrib["value"] = "outfile.txt"
if node.tag == "collection":
node.tag = "output_collection"
if node.attrib.get("name", None) == "stdout":
node.attrib["lines_diff"] = "2"
for a in set(node.attrib) - {"name", "value", "ftype", "lines_diff"}:
del node.attrib[a]
strip_elements(outputs, "delete_node", "discover_datasets", "filter", "change_format")
for node in outputs:
test_node.append(node)
# if no optional output is selected the stdout is added as output
if outputs_cnt == 0:
outputs_cnt = 1
test_node.attrib["expect_num_outputs"] = str(outputs_cnt)
elif not (test_macros_prefix is None or name is None):
expand_macros(tests_node, [p + name for p in test_macros_prefix])
def create_test_only(model, **kwargs):
parameter_hardcoder = kwargs["parameter_hardcoder"]
unsniffable = kwargs["test_unsniffable"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
section_nodes = dict()
section_params = dict()
test = Element("test")
advanced = add_child_node(test, "conditional", OrderedDict([("name", "adv_opts_cond")]))
add_child_node(advanced, "param", OrderedDict([("name", "adv_opts_selector"), ("value", "advanced")]))
optout = ["ctd_out_FLAG"]
outcnt = 1
for param in utils.extract_and_flatten_parameters(model, True):
ext = None
# no need to show hardcoded parameters
# except for the test parameter
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value is not None:
if param.name != "test":
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent = section_nodes[utils.extract_param_name(param.parent)]
elif type(param) is not ParameterGroup and param.advanced:
parent = advanced
else:
parent = test
if type(param) is ParameterGroup:
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = add_child_node(parent, "section", OrderedDict([("name", param.name)]))
continue
if param.type is _OutFile:
given = type(param.default) is _OutFile or (type(param.default) is list) and len(param.default) > 0
if not param.required and given:
optout.append("%s_FLAG" % param.name)
if given:
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type(param.default) is _OutFile:
f = param.default
elif type(param.default) is list:
f = param.default[0]
else:
raise Exception("Outfile with non str or list default {}[{}]".format(param, type(param.default)))
# get the file type from the longest possible extension that
# matches the known extensions
# longest: because e.g. pep.xml should be prefered over xml
if f.endswith(".tmp"):
f = f[:-4]
splitted = f.split(".")
ext = None
for i in range(len(splitted)):
check_ext = ".".join(splitted[i:])
if check_ext in o2g:
ext = o2g[check_ext]
break
if ext not in formats:
if ext == "txt" and "csv" in formats:
ext = "csv"
elif ext == "txt" and "tsv" in formats:
ext = "tsv"
elif len(formats) == 1:
ext = formats[0]
if len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
if type_param is None:
try:
print("{} -> {}".format(ext, g2o[ext]))
attrib = OrderedDict([("name", param.name + "_type"), ("value", g2o[ext])])
add_child_node(parent, "param", attrib)
except KeyError:
raise Exception(f"parent {parent} name {param.name} ext {ext}")
if type_param is not None and type(type_param.default) is _Null:
if ext is not None:
type_param.default = ext
if param.required or given:
outcnt += 1
# don't output empty values for bool, and data parameters
if type(param.default) is _Null and not param.required:
if is_boolean_parameter(param):
continue
elif param.type is _OutFile:
continue
elif param.type is _InFile:
continue
elif type(param.restrictions) is _Choices and (param.default is None or type(param.default) is _Null):
continue
# lists need to be joined appropriately
# - special care for outfile lists (ie collections): since we do not know (easily) the names of the collection elements we just use the count
# exception of list parameters that are hardcoded to non-lists (the the default is still a list)
if not param.is_list and type(param.default) is list:
logger.info("Found non-list parameter %s with list default (hardcoded?). Using only first value/" % param.name, 0)
try:
param.default = param.default[0]
except KeyError:
param.default = _Null()
if param.is_list and type(param.default) is not _Null:
if param.type is _InFile:
value = ','.join(map(str, param.default))
elif param.type is _OutFile:
value = str(len(param.default))
elif param.type is str:
if type(param.restrictions) is _Choices:
value = ','.join(map(str, param.default))
else:
value = '"' + '" "'.join(map(str, param.default)) + '"'
else:
value = ' '.join(map(str, param.default))
else:
if type(param.default) is bool:
value = str(param.default).lower()
else:
value = str(param.default)
# use name where dashes are replaced by underscores
# see also create inputs
if param.type is _OutFile:
name = get_galaxy_parameter_path(param, separator="_")
if param.is_list:
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", value)]))
else:
# TODO use delta_frac https://github.com/galaxyproject/galaxy/pull/9425
nd = add_child_node(test, "output", OrderedDict([("name", name), ("file", value), ("compare", "sim_size"), ("delta", "5700")]))
if ext:
nd.attrib["ftype"] = ext
elif param.type is _OutPrefix:
# #for outprefix elements / count need to be added manually
name = get_galaxy_parameter_path(param, separator="_")
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", "")]))
else:
name = get_galaxy_parameter_name(param)
nd = add_child_node(parent, "param", OrderedDict([("name", name), ("value", value)]))
# add format attribute for unsniffable extensions
if param.type is _InFile:
ext = os.path.splitext(value)[1][1:]
if ext in unsniffable and ext in o2g:
nd.attrib["ftype"] = o2g[ext]
add_child_node(test, "param", OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("value", ",".join(optout))]))
ctd_out = add_child_node(test, "output", OrderedDict([("name", "ctd_out"), ("ftype", "xml")]))
ctd_assert = add_child_node(ctd_out, "assert_contents")
add_child_node(ctd_assert, "is_valid_xml")
if outcnt == 0:
outcnt += 1
nd = add_child_node(test, "output", OrderedDict([("name", "stdout"),
("value", "stdout.txt"),
("compare", "sim_size")]))
test.attrib["expect_num_outputs"] = str(outcnt)
# if all_optional_outputs(model, parameter_hardcoder):
return test
def create_help(tool, model):
"""
create help section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
help_node = add_child_node(tool, "help")
help_node.text = CDATA(utils.extract_tool_help_text(model))
def add_child_node(parent_node, child_node_name, attributes=OrderedDict([]), text=None):
"""
helper function to add a child node using the given name to the given parent node
@param parent_node the parent
@param child_node_name the desired name of the child
@param attributes desired attributes of the child
@return the created child node
"""
child_node = SubElement(parent_node, child_node_name, attributes)
if text is not None:
child_node.text = text
return child_node
| WorkflowConversion/CTDConverter | ctdconverter/galaxy/converter.py | Python | gpl-3.0 | 95,121 |
#-----------------------------------------------------------------------------
# Copyright (c) 2016-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Hook for the xsge_gui module: https://pypi.python.org/pypi/xsge_gui
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('xsge_gui')
| etherkit/OpenBeacon2 | client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-xsge_gui.py | Python | gpl-3.0 | 674 |
from mazeexp.engine.mazeexp import MazeExplorer
| mryellow/maze_explorer | mazeexp/__init__.py | Python | mit | 48 |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
from chromite.cros.commands import cros_chrome_sdk_unittest
from chromite.lib import chrome_util
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import remote_access_unittest
from chromite.scripts import deploy_chrome
# TODO(build): Finish test wrapper (http://crosbug.com/37517).
# Until then, this has to be after the chromite imports.
import mock
# pylint: disable=W0212
_REGULAR_TO = ('--to', 'monkey')
_GS_PATH = 'gs://foon'
def _ParseCommandLine(argv):
return deploy_chrome._ParseCommandLine(['--log-level', 'debug'] + argv)
class InterfaceTest(cros_test_lib.OutputTestCase):
"""Tests the commandline interface of the script."""
BOARD = 'lumpy'
def testGsLocalPathUnSpecified(self):
"""Test no chrome path specified."""
with self.OutputCapturer():
self.assertRaises2(SystemExit, _ParseCommandLine, list(_REGULAR_TO),
check_attrs={'code': 2})
def testGsPathSpecified(self):
"""Test case of GS path specified."""
argv = list(_REGULAR_TO) + ['--gs-path', _GS_PATH]
_ParseCommandLine(argv)
def testLocalPathSpecified(self):
"""Test case of local path specified."""
argv = list(_REGULAR_TO) + ['--local-pkg-path', '/path/to/chrome']
_ParseCommandLine(argv)
def testNoTarget(self):
"""Test no target specified."""
argv = ['--gs-path', _GS_PATH]
self.assertParseError(argv)
def assertParseError(self, argv):
with self.OutputCapturer():
self.assertRaises2(SystemExit, _ParseCommandLine, argv,
check_attrs={'code': 2})
def testStagingFlagsNoStrict(self):
"""Errors out when --staging-flags is set without --strict."""
argv = ['--staging-only', '--build-dir=/path/to/nowhere',
'--board=%s' % self.BOARD, '--staging-flags=highdpi']
self.assertParseError(argv)
def testStrictNoBuildDir(self):
"""Errors out when --strict is set without --build-dir."""
argv = ['--staging-only', '--strict', '--gs-path', _GS_PATH]
self.assertParseError(argv)
def testNoBoardBuildDir(self):
argv = ['--staging-only', '--build-dir=/path/to/nowhere']
self.assertParseError(argv)
class DeployChromeMock(partial_mock.PartialMock):
TARGET = 'chromite.scripts.deploy_chrome.DeployChrome'
ATTRS = ('_KillProcsIfNeeded', '_DisableRootfsVerification')
def __init__(self):
partial_mock.PartialMock.__init__(self)
# Target starts off as having rootfs verification enabled.
self.rsh_mock = remote_access_unittest.RemoteShMock()
self.rsh_mock.SetDefaultCmdResult(0)
self.MockMountCmd(1)
self.rsh_mock.AddCmdResult(
deploy_chrome.LSOF_COMMAND % (deploy_chrome._CHROME_DIR,), 1)
def MockMountCmd(self, returnvalue):
self.rsh_mock.AddCmdResult(deploy_chrome.MOUNT_RW_COMMAND,
returnvalue)
def _DisableRootfsVerification(self, inst):
with mock.patch.object(time, 'sleep'):
self.backup['_DisableRootfsVerification'](inst)
def PreStart(self):
self.rsh_mock.start()
def PreStop(self):
self.rsh_mock.stop()
def _KillProcsIfNeeded(self, _inst):
# Fully stub out for now.
pass
class DeployTest(cros_test_lib.MockTempDirTestCase):
def _GetDeployChrome(self, args):
options, _ = _ParseCommandLine(args)
return deploy_chrome.DeployChrome(
options, self.tempdir, os.path.join(self.tempdir, 'staging'))
def setUp(self):
self.deploy_mock = self.StartPatcher(DeployChromeMock())
self.deploy = self._GetDeployChrome(
list(_REGULAR_TO) + ['--gs-path', _GS_PATH, '--force'])
class TestDisableRootfsVerification(DeployTest):
"""Testing disabling of rootfs verification and RO mode."""
def testDisableRootfsVerificationSuccess(self):
"""Test the working case, disabling rootfs verification."""
self.deploy_mock.MockMountCmd(0)
self.deploy._DisableRootfsVerification()
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
def testDisableRootfsVerificationFailure(self):
"""Test failure to disable rootfs verification."""
self.assertRaises(cros_build_lib.RunCommandError,
self.deploy._DisableRootfsVerification)
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
class TestMount(DeployTest):
"""Testing mount success and failure."""
def testSuccess(self):
"""Test case where we are able to mount as writable."""
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
self.deploy_mock.MockMountCmd(0)
self.deploy._MountRootfsAsWritable()
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
def testMountError(self):
"""Test that mount failure doesn't raise an exception by default."""
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
self.deploy._MountRootfsAsWritable()
self.assertTrue(self.deploy._rootfs_is_still_readonly.is_set())
def testMountRwFailure(self):
"""Test that mount failure raises an exception if error_code_ok=False."""
self.assertRaises(cros_build_lib.RunCommandError,
self.deploy._MountRootfsAsWritable, error_code_ok=False)
self.assertFalse(self.deploy._rootfs_is_still_readonly.is_set())
class TestUiJobStarted(DeployTest):
"""Test detection of a running 'ui' job."""
def MockStatusUiCmd(self, **kwargs):
self.deploy_mock.rsh_mock.AddCmdResult('status ui', **kwargs)
def testUiJobStartedFalse(self):
"""Correct results with a stopped job."""
self.MockStatusUiCmd(output='ui stop/waiting')
self.assertFalse(self.deploy._CheckUiJobStarted())
def testNoUiJob(self):
"""Correct results when the job doesn't exist."""
self.MockStatusUiCmd(error='start: Unknown job: ui', returncode=1)
self.assertFalse(self.deploy._CheckUiJobStarted())
def testCheckRootfsWriteableTrue(self):
"""Correct results with a running job."""
self.MockStatusUiCmd(output='ui start/running, process 297')
self.assertTrue(self.deploy._CheckUiJobStarted())
class StagingTest(cros_test_lib.MockTempDirTestCase):
"""Test user-mode and ebuild-mode staging functionality."""
def setUp(self):
self.staging_dir = os.path.join(self.tempdir, 'staging')
self.build_dir = os.path.join(self.tempdir, 'build_dir')
self.common_flags = ['--build-dir', self.build_dir,
'--board=lumpy', '--staging-only', '--cache-dir',
self.tempdir]
self.sdk_mock = self.StartPatcher(cros_chrome_sdk_unittest.SDKFetcherMock())
self.PatchObject(
osutils, 'SourceEnvironment', autospec=True,
return_value={'STRIP': 'x86_64-cros-linux-gnu-strip'})
def testSingleFileDeployFailure(self):
"""Default staging enforces that mandatory files are copied"""
options, _ = _ParseCommandLine(self.common_flags)
osutils.Touch(os.path.join(self.build_dir, 'chrome'), makedirs=True)
self.assertRaises(
chrome_util.MissingPathError, deploy_chrome._PrepareStagingDir,
options, self.tempdir, self.staging_dir)
def testSloppyDeployFailure(self):
"""Sloppy staging enforces that at least one file is copied."""
options, _ = _ParseCommandLine(self.common_flags + ['--sloppy'])
self.assertRaises(
chrome_util.MissingPathError, deploy_chrome._PrepareStagingDir,
options, self.tempdir, self.staging_dir)
def testSloppyDeploySuccess(self):
"""Sloppy staging - stage one file."""
options, _ = _ParseCommandLine(self.common_flags + ['--sloppy'])
osutils.Touch(os.path.join(self.build_dir, 'chrome'), makedirs=True)
deploy_chrome._PrepareStagingDir(options, self.tempdir, self.staging_dir)
def testEmptyDeployStrict(self):
"""Strict staging fails when there are no files."""
options, _ = _ParseCommandLine(
self.common_flags + ['--gyp-defines', 'chromeos=1', '--strict'])
chrome_util.MissingPathError(deploy_chrome._PrepareStagingDir,
options, self.tempdir, self.staging_dir)
self.assertRaises(
chrome_util.MissingPathError, deploy_chrome._PrepareStagingDir,
options, self.tempdir, self.staging_dir)
if __name__ == '__main__':
cros_test_lib.main()
| coreos/chromite | scripts/deploy_chrome_unittest.py | Python | bsd-3-clause | 8,629 |
"""
Script for building the example:
Usage:
python setup.py py2app
"""
from distutils.core import setup
import py2app
plist = dict(
CFBundleDocumentTypes = [
dict(
CFBundleTypeExtensions=[u'ToDos', u'*'],
CFBundleTypeName=u'ToDos File',
CFBundleTypeRole=u'Editor',
NSDocumentClass=u'ToDosDocument',
),
],
)
setup(
name='ToDos',
app=["ToDos.py"],
data_files=["English.lproj"],
options=dict(py2app=dict(
plist=plist,
)),
)
| albertz/music-player | mac/pyobjc-framework-Cocoa/Examples/AppKit/CocoaBindings/ToDos/setup.py | Python | bsd-2-clause | 528 |
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import tag
@tag(TestType = 'FVT', FeatureID = 'IOTOS-1546')
class Test_Intel_Graphics_lib(oeRuntimeTest):
''' Test Intel Graphics lib integrated '''
lib_info = {
"libDRM" : ["/usr/lib/libdrm.so"],
"xf86-video-intel" : ["/usr/lib/xorg/modules/drivers/intel_drv.so"],
"Mesa 3D" : [
"/usr/lib/libGL.so",
"/usr/lib/libGLESv2.so"
],
"VAAPI" : [
"/usr/lib/libva.so",
"/usr/lib/gstreamer-1.0/libgstvaapi.so"
],
}
def _test_integration(self,lib_path_list):
''' Check library integration '''
for lib_path in lib_path_list:
(status,output) = self.target.run("ls %s" % lib_path)
self.assertTrue(status == 0 , "%s is not in image: %s"
% (lib_path, output))
def test_libDRM_integration(self):
''' Check libDRM integration '''
self._test_integration(self.lib_info["libDRM"])
def test_xf86_video_intel_integration(self):
''' Check xf86-video-intel integration '''
self._test_integration(self.lib_info["xf86-video-intel"])
def test_Mesa_3D_integration(self):
''' Check Mesa 3D integration '''
self._test_integration(self.lib_info["Mesa 3D"])
def test_VAAPI_integration(self):
''' Check VAAPI integration '''
self._test_integration(self.lib_info["VAAPI"])
| daweiwu/meta-iotqa-1 | lib/oeqa/runtime/graphics/test_enable_Intel_Linux_Graphics_lib.py | Python | mit | 1,634 |
# -*- coding: utf-8 -*-
"""Microsoft Internet Explorer (MSIE) zone information collector."""
from winregrc import interface
class MSIEZoneInformation(object):
"""MSIE zone information.
Attributes:
control (str): control.
control_value (int|str): value to which the control is set.
zone (str): identifier of the zone to which the control applies.
zone_name (str): name of the zone to which the control applies.
"""
def __init__(self, zone, zone_name, control, control_value):
"""Initializes MSIE zone information.
Args:
zone (str): identifier of the zone to which the control applies.
zone_name (str): name of the zone to which the control applies.
control (str): control.
control_value (int|str): value to which the control is set.
"""
super(MSIEZoneInformation, self).__init__()
self.control = control
self.control_value = control_value
self.zone = zone
self.zone_name = zone_name
class MSIEZoneInformationCollector(interface.WindowsRegistryKeyCollector):
"""MSIE zone information collector."""
_LOCKDOWN_KEY_PATHS = [
# HKEY_CURRENT_USER
('HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Internet Explorer\\'
'Main\\FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN'),
('HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\Main\\'
'FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN'),
# HKEY_LOCAL_MACHINE
('HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\'
'Internet Explorer\\Main\\FeatureControl\\'
'FEATURE_LOCALMACHINE_LOCKDOWN'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Internet Explorer\\Main\\'
'FeatureControl\\FEATURE_LOCALMACHINE_LOCKDOWN'),
# HKEY_LOCAL_MACHINE WoW64
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\'
'Internet Explorer\\Main\\FeatureControl\\'
'FEATURE_LOCALMACHINE_LOCKDOWN'),
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\'
'Internet Explorer\\Main\\FeatureControl\\'
'FEATURE_LOCALMACHINE_LOCKDOWN')]
_ZONES_KEY_PATHS = [
# HKEY_CURRENT_USER
('HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Zones'),
('HKEY_CURRENT_USER\\Software\\Policies\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Lockdown_Zones'),
('HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Internet Settings\\Zones'),
('HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Internet Settings\\Lockdown_Zones'),
# HKEY_LOCAL_MACHINE
('HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Policies\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Lockdown_Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Internet Settings\\Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Internet Settings\\Lockdown_Zones'),
# HKEY_LOCAL_MACHINE WoW64
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\'
'Windows\\CurrentVersion\\Internet Settings\\Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Policies\\Microsoft\\'
'Windows\\CurrentVersion\\Internet Settings\\Lockdown_Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Zones'),
('HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\Microsoft\\Windows\\'
'CurrentVersion\\Internet Settings\\Lockdown_Zones')]
def _CollectZoneInformationFromLockdownKey(self, lockdown_key):
"""Collects MSIE zone information from a lockdown key.
Args:
lockdown_key (dfwinreg.WinRegistryKey): lockdown Windows Registry key.
"""
program_name = 'iexplore.exe'
program_value = lockdown_key.GetValueByName(program_name)
if program_value:
value = program_value.GetDataAsObject()
else:
value = 0
if self._debug:
if value == 1:
print('Local Machine lockdown for {0:s}: True'.format(program_name))
else:
print('Local Machine lockdown for {0:s}: False'.format(program_name))
print('')
# TODO: implement.
def _CollectZoneInformationFromZonesKey(self, zones_key):
"""Collects MSIE zone information from a zones key.
Args:
zones_key (dfwinreg.WinRegistryKey): zones Windows Registry key.
Yields:
MSIEZoneInformation: MSIE zone information.
"""
for zone_key in zones_key.GetSubkeys():
zone_name = self._GetValueFromKey(zone_key, 'DisplayName')
for setting_value in zone_key.GetValues():
# The 'Description' value contains a description of the zone.
# The 'PMDisplayName' value contains the display name of the zone in
# protected mode.
if setting_value.name in (
None, 'Description', 'DisplayName', 'PMDisplayName'):
continue
if len(setting_value.name) == 4 and setting_value.name != 'Icon':
if len(setting_value.data) != 4:
value_string = setting_value.data.encode('hex')
else:
value_string = setting_value.GetDataAsObject()
else:
value_string = None
yield MSIEZoneInformation(
zone_key.name, zone_name, setting_value.name, value_string)
def Collect(self, registry):
"""Collects the MSIE zone information.
Args:
registry (dfwinreg.WinRegistry): Windows Registry.
Yields:
MSIEZoneInformation: MSIE zone information.
"""
for key_path in self._LOCKDOWN_KEY_PATHS:
lockdown_key = registry.GetKeyByPath(key_path)
if lockdown_key:
# TODO: do something with information in lockdown key
self._CollectZoneInformationFromLockdownKey(lockdown_key)
# TODO: check for value Policies\\Microsoft\\Windows\\CurrentVersion\\
# Internet Settings\\Security_HKEY_LOCAL_MACHINE_only and its data
# if not exists or 0, not enabled if 1 only HKLM policy applies
for key_path in self._ZONES_KEY_PATHS:
zones_key = registry.GetKeyByPath(key_path)
if zones_key:
yield from self._CollectZoneInformationFromZonesKey(zones_key)
| libyal/winreg-kb | winregrc/msie_zone_info.py | Python | apache-2.0 | 6,351 |
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from service.fit import Fit
from service.market import Market
import gui.mainFrame
from gui.viewColumn import ViewColumn
from gui.bitmap_loader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
from gui.utils.listFormatter import formatList
from eos.utils.spoolSupport import SpoolType, SpoolOptions
import eos.config
class Miscellanea(ViewColumn):
name = "Miscellanea"
def __init__(self, fittingView, params=None):
if params is None:
params = {"showIcon": True, "displayName": False}
ViewColumn.__init__(self, fittingView)
if params["showIcon"]:
self.imageId = fittingView.imageList.GetImageIndex("column_misc", "gui")
self.bitmap = BitmapLoader.getBitmap("column_misc", "gui")
self.mask = wx.LIST_MASK_IMAGE
else:
self.imageId = -1
if params["displayName"] or self.imageId == -1:
self.columnText = _("Misc data")
self.mask |= wx.LIST_MASK_TEXT
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.fittingView = fittingView
def getText(self, stuff):
return self.__getData(stuff)[0]
def getToolTip(self, mod):
return self.__getData(mod)[1]
def getImageId(self, mod):
return -1
def getParameters(self):
return ("displayName", bool, False), ("showIcon", bool, True)
def __getData(self, stuff):
item = stuff.item
if item is None:
return "", None
itemGroup = item.group.name
itemCategory = item.category.name
if itemGroup == "Ship Modifiers":
return "", None
elif itemGroup == "Booster":
stuff.getModifiedItemAttr("boosterDuration")
text = "{0} min".format(formatAmount(stuff.getModifiedItemAttr("boosterDuration") / 1000 / 60, 3, 0, 3))
return text, "Booster Duration"
elif itemGroup in ("Super Weapon", "Structure Doomsday Weapon"):
volleyParams = stuff.getVolleyParameters(ignoreState=True)
dmg = sum(dt.total for dt in volleyParams.values())
duration = (max(volleyParams) - min(volleyParams)) / 1000
if dmg <= 0:
text = ""
tooltip = ""
elif duration > 0:
text = "{} over {}s".format(
formatAmount(dmg, 3, 0, 6),
formatAmount((duration), 0, 0, 0))
tooltip = "Raw damage done over time"
else:
text = "{} dmg".format(formatAmount(dmg, 3, 0, 6))
tooltip = "Raw damage done"
return text, tooltip
pass
elif itemGroup in ("Energy Weapon", "Hybrid Weapon", "Projectile Weapon", "Combat Drone", "Fighter Drone"):
trackingSpeed = stuff.getModifiedItemAttr("trackingSpeed")
optimalSig = stuff.getModifiedItemAttr("optimalSigRadius")
if not trackingSpeed or not optimalSig:
return "", None
normalizedTracking = trackingSpeed * 40000 / optimalSig
text = "{0}".format(formatAmount(normalizedTracking, 3, 0, 3))
tooltip = "Tracking speed"
return text, tooltip
elif itemGroup == "Precursor Weapon":
info = []
trackingSpeed = stuff.getModifiedItemAttr("trackingSpeed")
if trackingSpeed:
text = "{0}".format(formatAmount(trackingSpeed, 3, 0, 3))
tooltip = "tracking speed"
info.append((text, tooltip))
defaultSpoolValue = eos.config.settings['globalDefaultSpoolupPercentage']
spoolTime = stuff.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SPOOL_SCALE, defaultSpoolValue, False))[1]
if spoolTime:
text = "{0}s".format(formatAmount(spoolTime, 3, 0, 3))
tooltip = "spool up time"
info.append((text, tooltip))
if not info:
return "", None
text = ' | '.join(i[0] for i in info)
tooltip = ' and '.join(i[1] for i in info).capitalize()
return text, tooltip
elif itemGroup == "Vorton Projector":
cloudSize = stuff.getModifiedItemAttr("aoeCloudSize")
aoeVelocity = stuff.getModifiedItemAttr("aoeVelocity")
if not cloudSize or not aoeVelocity:
return "", None
text = "{0}{1} | {2}{3}".format(formatAmount(cloudSize, 3, 0, 3), "m",
formatAmount(aoeVelocity, 3, 0, 3), "m/s")
tooltip = "Explosion radius and explosion velocity"
return text, tooltip
elif itemCategory == "Subsystem":
slots = ("hi", "med", "low")
info = []
for slot in slots:
n = int(stuff.getModifiedItemAttr("%sSlotModifier" % slot))
if n > 0:
info.append("{0}{1}".format(n, slot[0].upper()))
return "+ " + ", ".join(info), "Slot Modifiers"
elif (
itemGroup in ("Energy Neutralizer", "Structure Energy Neutralizer") or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOENeut" in item.effects)
):
neutAmount = stuff.getModifiedItemAttr("energyNeutralizerAmount")
cycleParams = stuff.getCycleParameters()
if cycleParams is None:
return "", None
cycleTime = cycleParams.averageTime
if not neutAmount or not cycleTime:
return "", None
capPerSec = float(-neutAmount) * 1000 / cycleTime
text = "{0}/s".format(formatAmount(capPerSec, 3, 0, 3))
tooltip = "Energy neutralization per second"
return text, tooltip
elif itemGroup == "Energy Nosferatu":
neutAmount = stuff.getModifiedItemAttr("powerTransferAmount")
cycleParams = stuff.getCycleParameters()
if cycleParams is None:
return "", None
cycleTime = cycleParams.averageTime
if not neutAmount or not cycleTime:
return "", None
capPerSec = float(-neutAmount) * 1000 / cycleTime
text = "{0}/s".format(formatAmount(capPerSec, 3, 0, 3))
tooltip = "Energy neutralization per second"
return text, tooltip
elif itemGroup == "Salvager":
chance = stuff.getModifiedItemAttr("accessDifficultyBonus")
if not chance:
return "", None
text = "{0}%".format(formatAmount(chance, 3, 0, 3))
tooltip = "Item retrieval chance"
return text, tooltip
elif itemGroup == "Data Miners":
strength = stuff.getModifiedItemAttr("virusStrength")
coherence = stuff.getModifiedItemAttr("virusCoherence")
if not strength or not coherence:
return "", None
text = "{0} | {1}".format(formatAmount(strength, 3, 0, 3), formatAmount(coherence, 3, 0, 3))
tooltip = "Virus strength and coherence"
return text, tooltip
elif itemGroup in ("Warp Scrambler", "Warp Core Stabilizer", "Structure Warp Scrambler"):
scramStr = stuff.getModifiedItemAttr("warpScrambleStrength")
if not scramStr:
return "", None
text = "{0}".format(formatAmount(-scramStr, 3, 0, 3, forceSign=True))
tooltip = "Warp core strength modification"
return text, tooltip
elif (
itemGroup in ("Stasis Web", "Stasis Webifying Drone", "Structure Stasis Webifier") or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOEWeb" in item.effects)
):
speedFactor = stuff.getModifiedItemAttr("speedFactor")
if not speedFactor:
return "", None
text = "{0}%".format(formatAmount(speedFactor, 3, 0, 3))
tooltip = "Speed reduction"
return text, tooltip
elif (
itemGroup == "Target Painter" or
(itemGroup == "Structure Disruption Battery" and "structureModuleEffectTargetPainter" in item.effects) or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOEPaint" in item.effects)
):
sigRadBonus = stuff.getModifiedItemAttr("signatureRadiusBonus")
if not sigRadBonus:
return "", None
text = "{0}%".format(formatAmount(sigRadBonus, 3, 0, 3, forceSign=True))
tooltip = "Signature radius increase"
return text, tooltip
elif (
itemGroup == "Sensor Dampener" or
(itemGroup == "Structure Disruption Battery" and "structureModuleEffectRemoteSensorDampener" in item.effects) or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOEDamp" in item.effects)
):
lockRangeBonus = stuff.getModifiedItemAttr("maxTargetRangeBonus")
scanResBonus = stuff.getModifiedItemAttr("scanResolutionBonus")
if lockRangeBonus is None or scanResBonus is None:
return "", None
display = 0
for bonus in (lockRangeBonus, scanResBonus):
if abs(bonus) > abs(display):
display = bonus
if not display:
return "", None
text = "{0}%".format(formatAmount(display, 3, 0, 3, forceSign=True))
ttEntries = []
if display == lockRangeBonus:
ttEntries.append("lock range")
if display == scanResBonus:
ttEntries.append("scan resolution")
tooltip = "{0} dampening".format(formatList(ttEntries)).capitalize()
return text, tooltip
elif (
itemGroup in ("Weapon Disruptor", "Structure Disruption Battery") or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOETrack" in item.effects)
):
# Weapon disruption now covers both tracking and guidance (missile) disruptors
# First get the attributes for tracking disruptors
optimalRangeBonus = stuff.getModifiedItemAttr("maxRangeBonus")
falloffRangeBonus = stuff.getModifiedItemAttr("falloffBonus")
trackingSpeedBonus = stuff.getModifiedItemAttr("trackingSpeedBonus")
trackingDisruptorAttributes = {
"optimal range": optimalRangeBonus,
"falloff range": falloffRangeBonus,
"tracking speed": trackingSpeedBonus}
isTrackingDisruptor = any([x is not None and x != 0 for x in list(trackingDisruptorAttributes.values())])
# Then get the attributes for guidance disruptors
explosionVelocityBonus = stuff.getModifiedItemAttr("aoeVelocityBonus")
explosionRadiusBonus = stuff.getModifiedItemAttr("aoeCloudSizeBonus")
flightTimeBonus = stuff.getModifiedItemAttr("explosionDelayBonus")
missileVelocityBonus = stuff.getModifiedItemAttr("missileVelocityBonus")
guidanceDisruptorAttributes = {
"explosion velocity": explosionVelocityBonus,
"explosion radius": explosionRadiusBonus,
"flight time": flightTimeBonus,
"missile velocity": missileVelocityBonus}
isGuidanceDisruptor = any([x is not None and x != 0 for x in list(guidanceDisruptorAttributes.values())])
if not isTrackingDisruptor and not isGuidanceDisruptor:
return "", None
texts = []
ttSegments = []
for status, attributes in ((isTrackingDisruptor, trackingDisruptorAttributes), (isGuidanceDisruptor, guidanceDisruptorAttributes)):
if not status:
continue
display = max(list(attributes.values()), key=lambda x: abs(x))
texts.append("{0}%".format(formatAmount(display, 3, 0, 3, forceSign=True)))
ttEntries = []
for attributeName, attributeValue in list(attributes.items()):
if abs(attributeValue) == abs(display):
ttEntries.append(attributeName)
ttSegments.append("{0} disruption".format(formatList(ttEntries)).capitalize())
return ' | '.join(texts), '\n'.join(ttSegments)
elif itemGroup in (
"Gyrostabilizer",
"Magnetic Field Stabilizer",
"Heat Sink",
"Ballistic Control system",
"Structure Weapon Upgrade",
"Entropic Radiation Sink",
"Vorton Projector Upgrade"
):
attrMap = {
"Gyrostabilizer": ("damageMultiplier", "speedMultiplier", "Projectile weapon"),
"Magnetic Field Stabilizer": ("damageMultiplier", "speedMultiplier", "Hybrid weapon"),
"Heat Sink": ("damageMultiplier", "speedMultiplier", "Energy weapon"),
"Ballistic Control system": ("missileDamageMultiplierBonus", "speedMultiplier", "Missile"),
"Structure Weapon Upgrade": ("missileDamageMultiplierBonus", "speedMultiplier", "Missile"),
"Entropic Radiation Sink": ("damageMultiplier", "speedMultiplier", "Precursor weapon"),
"Vorton Projector Upgrade": ("damageMultiplier", "speedMultiplier", "Vorton projector")}
dmgAttr, rofAttr, weaponName = attrMap[itemGroup]
dmg = stuff.getModifiedItemAttr(dmgAttr)
rof = stuff.getModifiedItemAttr(rofAttr)
if not dmg or not rof:
return "", None
texts = []
tooltips = []
cumulative = (dmg / rof - 1) * 100
texts.append("{}%".format(formatAmount(cumulative, 3, 0, 3, forceSign=True)))
tooltips.append("{} DPS boost".format(weaponName))
droneDmg = stuff.getModifiedItemAttr("droneDamageBonus")
if droneDmg:
texts.append("{}%".format(formatAmount(droneDmg, 3, 0, 3, forceSign=True)))
tooltips.append("drone DPS boost".format(weaponName))
return ' | '.join(texts), ' and '.join(tooltips)
elif itemGroup == "Drone Damage Modules":
dmg = stuff.getModifiedItemAttr("droneDamageBonus")
if not dmg:
return
text = "{}%".format(formatAmount(dmg, 3, 0, 3, forceSign=True))
tooltip = "Drone DPS boost"
return text, tooltip
elif (
itemGroup in ("ECM", "Burst Jammer", "Structure ECM Battery") or
(itemGroup in ("Structure Burst Projector", "Burst Projectors") and "doomsdayAOEECM" in item.effects)
):
grav = stuff.getModifiedItemAttr("scanGravimetricStrengthBonus")
ladar = stuff.getModifiedItemAttr("scanLadarStrengthBonus")
radar = stuff.getModifiedItemAttr("scanRadarStrengthBonus")
magnet = stuff.getModifiedItemAttr("scanMagnetometricStrengthBonus")
displayMax = max(grav, ladar, radar, magnet)
displayMin = min(grav, ladar, radar, magnet)
if grav is None or ladar is None or radar is None or magnet is None or displayMax is None:
return "", None
if displayMax == displayMin or displayMin is None:
text = "{0}".format(
formatAmount(displayMax, 3, 0, 3),
)
else:
text = "{0} | {1}".format(
formatAmount(displayMax, 3, 0, 3),
formatAmount(displayMin, 3, 0, 3),
)
tooltip = "ECM Jammer Strength:\n{0} Gravimetric | {1} Ladar | {2} Magnetometric | {3} Radar".format(
formatAmount(grav, 3, 0, 3),
formatAmount(ladar, 3, 0, 3),
formatAmount(magnet, 3, 0, 3),
formatAmount(radar, 3, 0, 3),
)
return text, tooltip
elif itemGroup in ("Remote Sensor Booster", "Sensor Booster", "Signal Amplifier", "Structure Signal Amplifier"):
textLines = []
tooltipLines = []
scanResBonus = stuff.getModifiedItemAttr("scanResolutionBonus")
if scanResBonus:
textLines.append("{}%".format(formatAmount(scanResBonus, 3, 0, 3)))
tooltipLines.append("{}% scan resolution".format(formatAmount(scanResBonus, 3, 0, 3)))
lockRangeBonus = stuff.getModifiedItemAttr("maxTargetRangeBonus")
if lockRangeBonus:
textLines.append("{}%".format(formatAmount(lockRangeBonus, 3, 0, 3)))
tooltipLines.append("{}% lock range".format(formatAmount(lockRangeBonus, 3, 0, 3)))
gravBonus = stuff.getModifiedItemAttr("scanGravimetricStrengthPercent")
if gravBonus:
textLines.append("{}%".format(formatAmount(gravBonus, 3, 0, 3)))
tooltipLines.append("{}% sensor strength".format(formatAmount(gravBonus, 3, 0, 3)))
if not textLines:
return "", None
text = " | ".join(textLines)
tooltip = "Applied bonuses:\n{}".format(" | ".join(tooltipLines))
return text, tooltip
elif itemGroup in ("Projected ECCM", "ECCM", "Sensor Backup Array"):
grav = stuff.getModifiedItemAttr("scanGravimetricStrengthPercent")
ladar = stuff.getModifiedItemAttr("scanLadarStrengthPercent")
radar = stuff.getModifiedItemAttr("scanRadarStrengthPercent")
magnet = stuff.getModifiedItemAttr("scanMagnetometricStrengthPercent")
if grav is None or ladar is None or radar is None or magnet is None:
return "", None
display = max(grav, ladar, radar, magnet)
if not display:
return "", None
text = "{0}%".format(formatAmount(display, 3, 0, 3, forceSign=True))
ttEntries = []
if display == grav:
ttEntries.append("gravimetric")
if display == ladar:
ttEntries.append("ladar")
if display == magnet:
ttEntries.append("magnetometric")
if display == radar:
ttEntries.append("radar")
plu = "" if len(ttEntries) == 1 else "s"
tooltip = "{0} strength{1} bonus".format(formatList(ttEntries), plu).capitalize()
return text, tooltip
elif itemGroup == "Cloaking Device":
recalibration = stuff.getModifiedItemAttr("cloakingTargetingDelay")
if recalibration is None:
return "", None
text = "{0}s".format(formatAmount(float(recalibration) / 1000, 3, 0, 3))
tooltip = "Sensor recalibration time"
return text, tooltip
elif itemGroup == "Remote Armor Repairer":
rps = stuff.getRemoteReps(ignoreState=True).armor
if not rps:
return "", None
text = "{0}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True))
tooltip = "Armor repaired per second"
return text, tooltip
elif itemGroup == "Mutadaptive Remote Armor Repairer":
defaultSpoolValue = eos.config.settings['globalDefaultSpoolupPercentage']
spoolOptDefault = SpoolOptions(SpoolType.SPOOL_SCALE, defaultSpoolValue, False)
spoolOptPre = SpoolOptions(SpoolType.SPOOL_SCALE, 0, True)
spoolOptFull = SpoolOptions(SpoolType.SPOOL_SCALE, 1, True)
rps = stuff.getRemoteReps(spoolOptions=spoolOptDefault, ignoreState=True).armor
rpsPre = stuff.getRemoteReps(spoolOptions=spoolOptPre, ignoreState=True).armor
rpsFull = stuff.getRemoteReps(spoolOptions=spoolOptFull, ignoreState=True).armor
if not rps:
return "", None
text = []
tooltip = []
text.append("{}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True)))
tooltip.append("Armor repaired per second")
spoolTime = stuff.getSpoolData(spoolOptDefault)[1]
if spoolTime:
text.append("{}s".format(formatAmount(spoolTime, 3, 0, 3)))
tooltip.append("spool up time")
text = " | ".join(text)
tooltip = " and ".join(tooltip)
spoolTimePre = stuff.getSpoolData(spoolOptPre)[1]
spoolTimeFull = stuff.getSpoolData(spoolOptFull)[1]
if spoolTimePre != spoolTimeFull:
tooltip = "{}\nSpool up: {}-{} over {}s".format(
tooltip,
formatAmount(rpsPre, 3, 0, 3),
formatAmount(rpsFull, 3, 0, 3),
formatAmount(spoolTimeFull - spoolTimePre, 3, 0, 3))
return text, tooltip
elif itemGroup == "Remote Shield Booster":
rps = stuff.getRemoteReps(ignoreState=True).shield
if not rps:
return "", None
text = "{0}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True))
tooltip = "Shield transferred per second"
return text, tooltip
elif itemGroup == "Remote Capacitor Transmitter":
rps = stuff.getRemoteReps(ignoreState=True).capacitor
if not rps:
return "", None
text = "{0}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True))
tooltip = "Energy transferred per second"
return text, tooltip
elif itemGroup == "Remote Hull Repairer":
rps = stuff.getRemoteReps(ignoreState=True).hull
if not rps:
return "", None
text = "{0}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True))
tooltip = "Structure repaired per second"
return text, tooltip
elif itemGroup == "Gang Coordinator":
command = stuff.getModifiedItemAttr("commandBonus") or stuff.getModifiedItemAttr("commandBonusHidden")
if not command:
return "", None
text = "{0}%".format(formatAmount(command, 3, 0, 3, forceSign=True))
tooltip = "Gang bonus strength"
return text, tooltip
elif itemGroup == "Electronic Warfare Drone":
sigRadBonus = stuff.getModifiedItemAttr("signatureRadiusBonus")
lockRangeBonus = stuff.getModifiedItemAttr("maxTargetRangeBonus")
scanResBonus = stuff.getModifiedItemAttr("scanResolutionBonus")
falloffRangeBonus = stuff.getModifiedItemAttr("falloffBonus")
optimalRangeBonus = stuff.getModifiedItemAttr("maxRangeBonus")
trackingSpeedBonus = stuff.getModifiedItemAttr("trackingSpeedBonus")
grav = stuff.getModifiedItemAttr("scanGravimetricStrengthBonus")
ladar = stuff.getModifiedItemAttr("scanLadarStrengthBonus")
radar = stuff.getModifiedItemAttr("scanRadarStrengthBonus")
magnet = stuff.getModifiedItemAttr("scanMagnetometricStrengthBonus")
if sigRadBonus:
text = "{0}%".format(formatAmount(sigRadBonus, 3, 0, 3, forceSign=True))
tooltip = "Signature radius increase"
return text, tooltip
if lockRangeBonus or scanResBonus:
display = 0
for bonus in (lockRangeBonus, scanResBonus):
if abs(bonus) > abs(display):
display = bonus
if not display:
return "", None
text = "{0}%".format(formatAmount(display, 3, 0, 3, forceSign=True))
ttEntries = []
if display == lockRangeBonus:
ttEntries.append("lock range")
if display == scanResBonus:
ttEntries.append("scan resolution")
tooltip = "{0} dampening".format(formatList(ttEntries)).capitalize()
return text, tooltip
if falloffRangeBonus or optimalRangeBonus or trackingSpeedBonus:
display = 0
for bonus in (falloffRangeBonus, optimalRangeBonus, trackingSpeedBonus):
if abs(bonus) > abs(display):
display = bonus
if not display:
return "", None
text = "{0}%".format(formatAmount(display, 3, 0, 3), forceSign=True)
ttEntries = []
if display == optimalRangeBonus:
ttEntries.append("optimal range")
if display == falloffRangeBonus:
ttEntries.append("falloff range")
if display == trackingSpeedBonus:
ttEntries.append("tracking speed")
tooltip = "{0} disruption".format(formatList(ttEntries)).capitalize()
return text, tooltip
if grav is not None and ladar is not None and radar is not None and magnet is not None:
display = max(grav, ladar, radar, magnet)
if not display:
return "", None
text = "{0}".format(formatAmount(display, 3, 0, 3))
ttEntries = []
if display == grav:
ttEntries.append("gravimetric")
if display == ladar:
ttEntries.append("ladar")
if display == magnet:
ttEntries.append("magnetometric")
if display == radar:
ttEntries.append("radar")
plu = "" if len(ttEntries) == 1 else "s"
tooltip = "{0} strength{1}".format(formatList(ttEntries), plu).capitalize()
return text, tooltip
else:
return "", None
elif itemGroup == "Fighter Bomber":
optimalSig = stuff.getModifiedItemAttr("optimalSigRadius")
if not optimalSig:
return "", None
text = "{0}m".format(formatAmount(optimalSig, 3, 0, 3))
tooltip = "Optimal signature radius"
return text, tooltip
elif itemGroup in ("Frequency Mining Laser", "Strip Miner", "Mining Laser", "Gas Cloud Scoops", "Mining Drone", "Gas Cloud Harvesters"):
yps = stuff.getMiningYPS(ignoreState=True)
if not yps:
return "", None
yph = yps * 3600
wps = stuff.getMiningWPS(ignoreState=True)
wph = wps * 3600
textParts = []
textParts.append(formatAmount(yps, 3, 0, 3))
tipLines = []
tipLines.append("{} m\u00B3 mining yield per second ({} m\u00B3 per hour)".format(
formatAmount(yps, 3, 0, 3), formatAmount(yph, 3, 0, 3)))
if wps > 0:
textParts.append(formatAmount(wps, 3, 0, 3))
tipLines.append("{} m\u00B3 mining waste per second ({} m\u00B3 per hour)".format(
formatAmount(wps, 3, 0, 3), formatAmount(wph, 3, 0, 3)))
text = '{} m\u00B3/s'.format('+'.join(textParts))
tooltip = '\n'.join(tipLines)
return text, tooltip
elif itemGroup == "Logistic Drone":
rpsData = stuff.getRemoteReps(ignoreState=True)
rrType = None
rps = None
if rpsData.shield:
rps = rpsData.shield
rrType = 'Shield'
elif rpsData.armor:
rps = rpsData.armor
rrType = 'Armor'
elif rpsData.hull:
rps = rpsData.hull
rrType = 'Hull'
if not rrType or not rps:
return "", None
text = "{}/s".format(formatAmount(rps, 3, 0, 3))
tooltip = "{} HP repaired per second\n{} HP/s per drone".format(rrType, formatAmount(rps / stuff.amount, 3, 0, 3))
return text, tooltip
elif itemGroup == "Energy Neutralizer Drone":
neutAmount = stuff.getModifiedItemAttr("energyNeutralizerAmount")
cycleTime = stuff.getModifiedItemAttr("energyNeutralizerDuration")
if not neutAmount or not cycleTime:
return "", None
capPerSec = float(-neutAmount) * 1000 / cycleTime
text = "{0}/s".format(formatAmount(capPerSec, 3, 0, 3))
tooltip = "Energy neutralization per second"
return text, tooltip
elif itemGroup in ("Micro Jump Drive", "Micro Jump Field Generators"):
cycleTime = stuff.getModifiedItemAttr("duration") / 1000
text = "{0}s".format(formatAmount(cycleTime, 3, 0, 3))
tooltip = "Spoolup time"
return text, tooltip
elif itemGroup in ("Siege Module", "Cynosural Field Generator"):
amt = stuff.getModifiedItemAttr("consumptionQuantity")
if amt:
typeID = stuff.getModifiedItemAttr("consumptionType")
item = Market.getInstance().getItem(typeID)
text = "{0} units".format(formatAmount(amt, 3, 0, 3))
return text, item.name
else:
return "", None
elif itemGroup in (
"Ancillary Armor Repairer",
"Ancillary Shield Booster",
"Capacitor Booster",
"Ancillary Remote Armor Repairer",
"Ancillary Remote Shield Booster",
):
if "Armor" in itemGroup or "Shield" in itemGroup:
boosted_attribute = "HP"
reload_time = stuff.getModifiedItemAttr("reloadTime", 0) / 1000
elif "Capacitor" in itemGroup:
boosted_attribute = "Cap"
reload_time = 10
else:
boosted_attribute = ""
reload_time = 0
cycles = max(stuff.numShots, 0)
cycleTime = max(stuff.rawCycleTime, 0)
# Get HP or boosted amount
stuff_hp = max(stuff.hpBeforeReload, 0)
armor_hp = stuff.getModifiedItemAttr("armorDamageAmount", 0)
capacitor_hp = stuff.getModifiedChargeAttr("capacitorBonus", 0)
shield_hp = stuff.getModifiedItemAttr("shieldBonus", 0)
hp = max(stuff_hp, armor_hp * cycles, capacitor_hp * cycles, shield_hp * cycles, 0)
nonChargedMap = {
"Ancillary Remote Armor Repairer": ("armor", "Armor repaired per second"),
"Ancillary Remote Shield Booster": ("shield", "Shield transferred per second")}
if not cycles and itemGroup in nonChargedMap:
rps = stuff.getRemoteReps(ignoreState=True)
rps = getattr(rps, nonChargedMap[itemGroup][0])
if not rps:
return "", None
text = "{0}/s".format(formatAmount(rps, 3, 0, 3, forceSign=True))
tooltip = nonChargedMap[itemGroup][1]
return text, tooltip
if not hp or not cycleTime or not cycles:
return "", None
fit = Fit.getInstance().getFit(self.fittingView.getActiveFit())
ehpTotal = fit.ehp
hpTotal = fit.hp
try:
useEhp = self.mainFrame.statsPane.nameViewMap["resistancesViewFull"].showEffective
except KeyError:
useEhp = False
tooltip = "{0} restored over duration using charges (plus reload)".format(boosted_attribute)
if useEhp and boosted_attribute == "HP" and "Remote" not in itemGroup:
if "Ancillary Armor Repairer" in itemGroup:
hpRatio = ehpTotal["armor"] / hpTotal["armor"]
else:
hpRatio = ehpTotal["shield"] / hpTotal["shield"]
tooltip = "E{0}".format(tooltip)
else:
hpRatio = 1
if "Ancillary" in itemGroup and "Armor" in itemGroup:
hpRatio *= stuff.getModifiedItemAttr("chargedArmorDamageMultiplier", 1)
ehp = hp * hpRatio
duration = cycles * cycleTime / 1000
for number_of_cycles in {5, 10, 25}:
tooltip = "{0}\n{1} charges lasts {2} seconds ({3} cycles)".format(
tooltip,
formatAmount(number_of_cycles * cycles, 3, 0, 3),
formatAmount((duration + reload_time) * number_of_cycles, 3, 0, 3),
formatAmount(number_of_cycles, 3, 0, 3)
)
text = "{0} / {1}s (+{2}s)".format(
formatAmount(ehp, 3, 0, 9),
formatAmount(duration, 3, 0, 3),
formatAmount(reload_time, 3, 0, 3)
)
return text, tooltip
elif itemGroup == "Armor Resistance Shift Hardener":
itemArmorResistanceShiftHardenerEM = (1 - stuff.getModifiedItemAttr("armorEmDamageResonance")) * 100
itemArmorResistanceShiftHardenerTherm = (1 - stuff.getModifiedItemAttr("armorThermalDamageResonance")) * 100
itemArmorResistanceShiftHardenerKin = (1 - stuff.getModifiedItemAttr("armorKineticDamageResonance")) * 100
itemArmorResistanceShiftHardenerExp = (1 - stuff.getModifiedItemAttr("armorExplosiveDamageResonance")) * 100
text = "{0}% | {1}% | {2}% | {3}%".format(
formatAmount(itemArmorResistanceShiftHardenerEM, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerTherm, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerKin, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerExp, 3, 0, 3),
)
tooltip = "Resistances shifted to damage profile:\n{0}% EM | {1}% Therm | {2}% Kin | {3}% Exp".format(
formatAmount(itemArmorResistanceShiftHardenerEM, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerTherm, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerKin, 3, 0, 3),
formatAmount(itemArmorResistanceShiftHardenerExp, 3, 0, 3),
)
return text, tooltip
elif itemGroup in ("Cargo Scanner", "Ship Scanner", "Survey Scanner"):
duration = stuff.getModifiedItemAttr("duration")
if not duration:
return "", None
text = "{}s".format(formatAmount(duration / 1000, 3, 0, 0))
tooltip = "Scan duration"
return text, tooltip
elif itemGroup == "Command Burst":
textSections = []
tooltipSections = []
buffMap = {}
for seq in (1, 2, 3, 4):
buffId = stuff.getModifiedChargeAttr(f'warfareBuff{seq}ID')
if not buffId:
continue
buffValue = stuff.getModifiedItemAttr(f'warfareBuff{seq}Value')
buffMap[buffId] = buffValue
if buffId == 10: # Shield Burst: Shield Harmonizing: Shield Resistance
# minus buff value because ingame shows positive value
textSections.append(f"{formatAmount(-buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("shield resistance")
elif buffId == 11: # Shield Burst: Active Shielding: Repair Duration/Capacitor
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("shield RR duration & capacictor use")
elif buffId == 12: # Shield Burst: Shield Extension: Shield HP
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("shield HP")
elif buffId == 13: # Armor Burst: Armor Energizing: Armor Resistance
# minus buff value because ingame shows positive value
textSections.append(f"{formatAmount(-buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("armor resistance")
elif buffId == 14: # Armor Burst: Rapid Repair: Repair Duration/Capacitor
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("armor RR duration & capacitor use")
elif buffId == 15: # Armor Burst: Armor Reinforcement: Armor HP
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("armor HP")
elif buffId == 16: # Information Burst: Sensor Optimization: Scan Resolution
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("scan resolution")
elif buffId == 26: # Information Burst: Sensor Optimization: Targeting Range
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("targeting range")
elif buffId == 17: # Information Burst: Electronic Superiority: EWAR Range and Strength
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("electronic warfare modules range & strength")
elif buffId == 18: # Information Burst: Electronic Hardening: Sensor Strength
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("sensor strength")
elif buffId == 19: # Information Burst: Electronic Hardening: RSD/RWD Resistance
textSections.append(f"{formatAmount(-buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("sensor dampener & weapon disruption resistance")
elif buffId == 20: # Skirmish Burst: Evasive Maneuvers: Signature Radius
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("signature radius")
elif buffId == 60: # Skirmish Burst: Evasive Maneuvers: Agility
# minus the buff value because we want Agility as shown ingame, not inertia modifier
textSections.append(f"{formatAmount(-buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("agility")
elif buffId == 21: # Skirmish Burst: Interdiction Maneuvers: Tackle Range
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("warp disruption & stasis web range")
elif buffId == 22: # Skirmish Burst: Rapid Deployment: AB/MWD Speed Increase
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("AB/MWD speed increase")
elif buffId == 23: # Mining Burst: Mining Laser Field Enhancement: Mining/Survey Range
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("mining/survey module range")
elif buffId == 24: # Mining Burst: Mining Laser Optimization: Mining Capacitor/Duration
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("mining module duration & capacitor use")
elif buffId == 25: # Mining Burst: Mining Equipment Preservation: Crystal Volatility
textSections.append(f"{formatAmount(buffValue, 3, 0, 3, forceSign=True)}%")
tooltipSections.append("mining crystal volatility")
if not textSections:
return '', None
text = ' | '.join(textSections)
tooltip = '{} bonus'.format(' | '.join(tooltipSections))
if tooltip:
tooltip = tooltip[0].capitalize() + tooltip[1:]
return text, tooltip
elif stuff.charge is not None:
chargeGroup = stuff.charge.group.name
if chargeGroup.endswith("Rocket") or chargeGroup.endswith("Missile") or chargeGroup.endswith("Torpedo"):
cloudSize = stuff.getModifiedChargeAttr("aoeCloudSize")
aoeVelocity = stuff.getModifiedChargeAttr("aoeVelocity")
if not cloudSize or not aoeVelocity:
return "", None
text = "{0}{1} | {2}{3}".format(formatAmount(cloudSize, 3, 0, 3), "m",
formatAmount(aoeVelocity, 3, 0, 3), "m/s")
tooltip = "Explosion radius and explosion velocity"
return text, tooltip
elif chargeGroup in ("Bomb", "Guided Bomb"):
cloudSize = stuff.getModifiedChargeAttr("aoeCloudSize")
if not cloudSize:
return "", None
text = "{0}{1}".format(formatAmount(cloudSize, 3, 0, 3), "m")
tooltip = "Explosion radius"
return text, tooltip
elif chargeGroup in ("Scanner Probe",):
scanStr = stuff.getModifiedChargeAttr("baseSensorStrength")
baseRange = stuff.getModifiedChargeAttr("baseScanRange")
if not scanStr or not baseRange:
return "", None
text = "{}".format(formatAmount(scanStr, 4, 0, 3))
tooltip = "Scan strength at {} AU scan range".format(formatAmount(baseRange, 3, 0, 0))
return text, tooltip
else:
return "", None
else:
return "", None
Miscellanea.register()
| pyfa-org/Pyfa | gui/builtinViewColumns/misc.py | Python | gpl-3.0 | 42,823 |
from . import model
from . import cv_tools
| gu-yan/mlAlgorithms | mxnet/__init__.py | Python | apache-2.0 | 43 |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import time
import logging
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
from request_header_validator_interceptor import RequestHeaderValidatorInterceptor
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve():
header_validator = RequestHeaderValidatorInterceptor(
'one-time-password', '42', grpc.StatusCode.UNAUTHENTICATED,
'Access denied!')
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
interceptors=(header_validator,))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
logging.basicConfig()
serve()
| endlessm/chromium-browser | third_party/grpc/src/examples/python/interceptors/headers/greeter_server.py | Python | bsd-3-clause | 1,679 |
from __future__ import unicode_literals
from django.utils import regex_helper
from django.utils import unittest
class NormalizeTests(unittest.TestCase):
def test_empty(self):
pattern = r""
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_escape(self):
pattern = r"\\\^\$\.\|\?\*\+\(\)\["
expected = [('\\^$.|?*+()[', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_positional(self):
pattern = r"(.*)-(.+)"
expected = [('%(_0)s-%(_1)s', ['_0', '_1'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_ignored(self):
pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)"
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_noncapturing(self):
pattern = r"(?:non-capturing)"
expected = [('non-capturing', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_named(self):
pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)"
expected = [('%(first_group_name)s-%(second_group_name)s',
['first_group_name', 'second_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_backreference(self):
pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)"
expected = [('%(first_group_name)s-%(first_group_name)s',
['first_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
| openhatch/new-mini-tasks | vendor/packages/Django/tests/regressiontests/utils/regex_helper.py | Python | apache-2.0 | 1,801 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Agreement import Agreement
class LocationGrant(Agreement):
"""A grant provides a right, as defined by type, for a parcel of land. Note that the association to Location, Asset, Organisation, etc. for the Grant is inherited from Agreement, a type of Document.A grant provides a right, as defined by type, for a parcel of land. Note that the association to Location, Asset, Organisation, etc. for the Grant is inherited from Agreement, a type of Document.
"""
def __init__(self, propertyData='', LandProperty=None, *args, **kw_args):
"""Initialises a new 'LocationGrant' instance.
@param propertyData: Property related information that describes the Grant's land parcel. For example, it may be a deed book number, deed book page number, and parcel number.
@param LandProperty: Land property this location grant applies to.
"""
#: Property related information that describes the Grant's land parcel. For example, it may be a deed book number, deed book page number, and parcel number.
self.propertyData = propertyData
self._LandProperty = None
self.LandProperty = LandProperty
super(LocationGrant, self).__init__(*args, **kw_args)
_attrs = ["propertyData"]
_attr_types = {"propertyData": str}
_defaults = {"propertyData": ''}
_enums = {}
_refs = ["LandProperty"]
_many_refs = []
def getLandProperty(self):
"""Land property this location grant applies to.
"""
return self._LandProperty
def setLandProperty(self, value):
if self._LandProperty is not None:
filtered = [x for x in self.LandProperty.LocationGrants if x != self]
self._LandProperty._LocationGrants = filtered
self._LandProperty = value
if self._LandProperty is not None:
if self not in self._LandProperty._LocationGrants:
self._LandProperty._LocationGrants.append(self)
LandProperty = property(getLandProperty, setLandProperty)
| rwl/PyCIM | CIM15/IEC61970/Informative/InfLocations/LocationGrant.py | Python | mit | 3,144 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DatasetInfo records the information we know about a dataset.
This includes things that we know about the dataset statically, i.e.:
- schema
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- feature statistics (in each split)
- etc.
"""
import abc
import json
import os
import posixpath
import tempfile
from typing import Dict, Optional, Tuple, Union
from absl import logging
from etils import epath
import six
import tensorflow as tf
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import naming
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.features import feature as feature_lib
from tensorflow_datasets.core.features import top_level_feature
from tensorflow_datasets.core.proto import dataset_info_pb2
from tensorflow_datasets.core.utils import gcs_utils
from google.protobuf import json_format
# TODO(b/109648354): Remove the "pytype: disable" comment.
Nest = Union[Tuple["Nest", ...], Dict[str, "Nest"], str] # pytype: disable=not-supported-yet
SupervisedKeysType = Union[Tuple[Nest, Nest], Tuple[Nest, Nest, Nest]]
# Name of the file to output the DatasetInfo protobuf object.
DATASET_INFO_FILENAME = "dataset_info.json"
LICENSE_FILENAME = "LICENSE"
METADATA_FILENAME = "metadata.json"
@six.add_metaclass(abc.ABCMeta)
class Metadata(dict):
"""Abstract base class for DatasetInfo metadata container.
`builder.info.metadata` allows the dataset to expose additional general
information about the dataset which are not specific to a feature or
individual example.
To implement the interface, overwrite `save_metadata` and
`load_metadata`.
See `tfds.core.MetadataDict` for a simple implementation that acts as a
dict that saves data to/from a JSON file.
"""
@abc.abstractmethod
def save_metadata(self, data_dir):
"""Save the metadata."""
raise NotImplementedError()
@abc.abstractmethod
def load_metadata(self, data_dir):
"""Restore the metadata."""
raise NotImplementedError()
class DatasetInfo(object):
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known if the dataset hasn't been generated yet
(before the first `builder.download_and_prepare()` call). For example splits
names or number of examples might be missing (as they are computed
at dataset creation time).
"""
def __init__(
self,
*,
builder,
description: Optional[str] = None,
features: Optional[feature_lib.FeatureConnector] = None,
supervised_keys: Optional[SupervisedKeysType] = None,
disable_shuffling: bool = False,
homepage: Optional[str] = None,
citation: Optional[str] = None,
metadata: Optional[Metadata] = None,
license: Optional[str] = None, # pylint: disable=redefined-builtin
redistribution_info: Optional[Dict[str, str]] = None,
split_dict: Optional[splits_lib.SplitDict] = None):
"""Constructs DatasetInfo.
Args:
builder: `DatasetBuilder`, dataset builder for this info.
description: `str`, description of this dataset.
features: `tfds.features.FeaturesDict`, Information on the feature dict of
the `tf.data.Dataset()` object from the `builder.as_dataset()` method.
supervised_keys: Specifies the input structure for supervised learning, if
applicable for the dataset, used with "as_supervised". The keys
correspond to the feature names to select in `info.features`. When
calling `tfds.core.DatasetBuilder.as_dataset()` with
`as_supervised=True`, the `tf.data.Dataset` object will yield the
structure defined by the keys passed here, instead of that defined by
the `features` argument. Typically this is a `(input_key, target_key)`
tuple, and the dataset yields a tuple of tensors `(input, target)`
tensors.
To yield a more complex structure, pass a tuple of `tf.nest` compatible
structures of feature keys. The resulting `Dataset` will yield
structures with each key replaced by the coresponding tensor. For
example, passing a triple of keys would return a dataset
that yields `(feature, target, sample_weights)` triples for keras.
Using `supervised_keys=({'a':'a','b':'b'}, 'c')` would create a dataset
yielding a tuple with a dictionary of features in the `features`
position.
Note that selecting features in nested `tfds.features.FeaturesDict`
objects is not supported.
disable_shuffling: `bool`, specify whether to shuffle the examples.
homepage: `str`, optional, the homepage for this dataset.
citation: `str`, optional, the citation to use for this dataset.
metadata: `tfds.core.Metadata`, additonal object which will be
stored/restored with the dataset. This allows for storing additional
information with the dataset.
license: license of the dataset.
redistribution_info: information needed for redistribution, as specified
in `dataset_info_pb2.RedistributionInfo`. The content of the `license`
subfield will automatically be written to a LICENSE file stored with the
dataset.
split_dict: information about the splits in this dataset.
"""
self._builder = builder
if builder.builder_config:
config_name = builder.builder_config.name
config_description = builder.builder_config.description
else:
config_name = None
config_description = None
self._info_proto = dataset_info_pb2.DatasetInfo(
name=builder.name,
description=utils.dedent(description),
version=str(builder.version),
release_notes=builder.release_notes,
disable_shuffling=disable_shuffling,
config_name=config_name,
config_description=config_description,
citation=utils.dedent(citation),
module_name=str(builder.__module__),
redistribution_info=dataset_info_pb2.RedistributionInfo(
license=utils.dedent(license or redistribution_info.pop("license")),
**redistribution_info) if redistribution_info else None)
if homepage:
self._info_proto.location.urls[:] = [homepage]
if features:
if not isinstance(features, top_level_feature.TopLevelFeature):
raise ValueError(
"DatasetInfo.features only supports FeaturesDict or Sequence at "
"the top-level. Got {}".format(features))
self._features = features
self._splits = splits_lib.SplitDict([])
if split_dict:
self.set_splits(split_dict)
if supervised_keys is not None:
self._info_proto.supervised_keys.CopyFrom(
_supervised_keys_to_proto(supervised_keys))
if metadata and not isinstance(metadata, Metadata):
raise ValueError(
"Metadata should be a `tfds.core.Metadata` instance. Received "
"{}".format(metadata))
self._metadata = metadata
# Is this object initialized with both the static and the dynamic data?
self._fully_initialized = False
@classmethod
def from_proto(cls, builder,
proto: dataset_info_pb2.DatasetInfo) -> "DatasetInfo":
"""Instantiates DatasetInfo from the given builder and proto."""
if builder.builder_config:
assert builder.builder_config.name == proto.config_name
assert str(builder.version) == proto.version
features = None
if proto.HasField("features"):
features = feature_lib.FeatureConnector.from_proto(proto.features)
supervised_keys = None
if proto.HasField("supervised_keys"):
supervised_keys = _supervised_keys_from_proto(proto.supervised_keys)
filename_template = naming.ShardedFileTemplate(
dataset_name=builder.name,
data_dir=builder.data_dir,
filetype_suffix=proto.file_format or "tfrecord")
return cls(
builder=builder,
description=proto.description,
features=features,
supervised_keys=supervised_keys,
disable_shuffling=proto.disable_shuffling,
citation=proto.citation,
license=proto.redistribution_info.license,
split_dict=splits_lib.SplitDict.from_proto(
repeated_split_infos=proto.splits,
filename_template=filename_template),
)
@property
def as_proto(self) -> dataset_info_pb2.DatasetInfo:
return self._info_proto
@property
def name(self) -> str:
return self.as_proto.name
@property
def config_name(self) -> str:
return self.as_proto.config_name
@property
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
return posixpath.join(*names)
@property
def description(self):
return self.as_proto.description
@property
def version(self):
return self._builder.version
@property
def release_notes(self) -> Optional[Dict[str, str]]:
return self._builder.release_notes
@property
def disable_shuffling(self) -> bool:
return self.as_proto.disable_shuffling
@property
def homepage(self):
urls = self.as_proto.location.urls
tfds_homepage = f"https://www.tensorflow.org/datasets/catalog/{self.name}"
return urls and urls[0] or tfds_homepage
@property
def citation(self) -> str:
return self.as_proto.citation
@property
def data_dir(self):
return self._builder.data_dir
@property
def dataset_size(self) -> utils.Size:
"""Generated dataset files size, in bytes."""
# For old datasets, maybe empty.
return utils.Size(sum(split.num_bytes for split in self.splits.values()))
@property
def download_size(self) -> utils.Size:
"""Downloaded files size, in bytes."""
# Fallback to deprecated `size_in_bytes` if `download_size` is empty.
return utils.Size(self.as_proto.download_size or
self.as_proto.size_in_bytes)
@download_size.setter
def download_size(self, size):
self.as_proto.download_size = size
@property
def features(self):
return self._features
@property
def metadata(self) -> Optional[Metadata]:
return self._metadata
@property
def supervised_keys(self) -> Optional[SupervisedKeysType]:
if not self.as_proto.HasField("supervised_keys"):
return None
supervised_keys = self.as_proto.supervised_keys
return _supervised_keys_from_proto(supervised_keys)
@property
def redistribution_info(self):
return self.as_proto.redistribution_info
@property
def module_name(self) -> str:
return self.as_proto.module_name
@property
def file_format(self) -> Optional[file_adapters.FileFormat]:
if not self.as_proto.file_format:
return None
return file_adapters.FileFormat(self.as_proto.file_format)
def set_file_format(
self,
file_format: Union[None, str, file_adapters.FileFormat],
) -> None:
"""Internal function to define the file format.
The file format is set during `FileReaderBuilder.__init__`,
not `DatasetInfo.__init__`.
Args:
file_format: The file format.
"""
# If file format isn't present already, fallback to `DEFAULT_FILE_FORMAT`
file_format = (
file_format # Format explicitly given: tfds.builder(..., file_format=x)
or self.file_format # Format restored from dataset_info.json
or file_adapters.DEFAULT_FILE_FORMAT)
try:
new_file_format = file_adapters.FileFormat(file_format)
except ValueError as e:
all_values = [f.value for f in file_adapters.FileFormat]
utils.reraise(e, suffix=f". Valid file formats: {all_values}")
# If the file format has been set once, file format should be consistent
if self.file_format and self.file_format != new_file_format:
raise ValueError(f"File format is already set to {self.file_format}. "
f"Got {new_file_format}")
self.as_proto.file_format = new_file_format.value
@property
def splits(self) -> splits_lib.SplitDict:
return self._splits
def set_splits(self, split_dict: splits_lib.SplitDict) -> None:
"""Split setter (private method)."""
for split, split_info in split_dict.items():
if isinstance(split_info, splits_lib.MultiSplitInfo):
# When splits are from multiple folders, the dataset can be different.
continue
if (split_info.filename_template and
self._builder.name != split_info.filename_template.dataset_name):
raise AssertionError(
f"SplitDict contains SplitInfo for split {split} whose "
"dataset_name does not match to the dataset name in dataset_info. "
f"{self._builder.name} != {split_info.filename_template.dataset_name}"
)
# If the statistics have been pre-loaded, forward the statistics
# into the new split_dict. Also add the filename template if it's not set.
new_split_infos = []
incomplete_filename_template = naming.ShardedFileTemplate(
dataset_name=self.name,
data_dir=self.data_dir,
filetype_suffix=self.as_proto.file_format or "tfrecord")
for split_info in split_dict.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
new_split_infos.append(split_info)
continue
old_split_info = self._splits.get(split_info.name)
if (not split_info.statistics.ByteSize() and old_split_info and
old_split_info.statistics.ByteSize() and
old_split_info.shard_lengths == split_info.shard_lengths):
split_info = split_info.replace(statistics=old_split_info.statistics)
if not split_info.filename_template:
filename_template = incomplete_filename_template.replace(
split=split_info.name)
split_info = split_info.replace(filename_template=filename_template)
new_split_infos.append(split_info)
# Update the dictionary representation.
self._splits = splits_lib.SplitDict(new_split_infos)
# Update the proto
# Note that the proto should not be saved or used for multi-folder datasets.
del self.as_proto.splits[:] # Clear previous
for split_info in self._splits.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
for si in split_info.split_infos:
self.as_proto.splits.add().CopyFrom(si.to_proto())
else:
self.as_proto.splits.add().CopyFrom(split_info.to_proto())
def update_data_dir(self, data_dir: str) -> None:
"""Updates the data dir for each split."""
new_split_infos = []
for split_info in self._splits.values():
if isinstance(split_info, splits_lib.MultiSplitInfo):
raise RuntimeError(
"Updating the data_dir for MultiSplitInfo is not supported!")
filename_template = split_info.filename_template.replace(
data_dir=data_dir)
new_split_info = split_info.replace(filename_template=filename_template)
new_split_infos.append(new_split_info)
self.set_splits(splits_lib.SplitDict(new_split_infos))
@property
def initialized(self) -> bool:
"""Whether DatasetInfo has been fully initialized."""
return self._fully_initialized
def _dataset_info_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, DATASET_INFO_FILENAME)
def _license_path(self, dataset_info_dir):
return os.path.join(dataset_info_dir, LICENSE_FILENAME)
@property
def as_json(self) -> str:
return json_format.MessageToJson(self.as_proto, sort_keys=True)
def write_to_directory(self, dataset_info_dir) -> None:
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the features structure & metadata (vocabulary, labels,...)
if self.features:
self.features.save_config(dataset_info_dir)
# Save any additional metadata
if self.metadata is not None:
self.metadata.save_metadata(dataset_info_dir)
if self.redistribution_info.license:
with tf.io.gfile.GFile(self._license_path(dataset_info_dir), "w") as f:
f.write(self.redistribution_info.license)
with tf.io.gfile.GFile(self._dataset_info_path(dataset_info_dir), "w") as f:
f.write(self.as_json)
def read_from_directory(self, dataset_info_dir: str) -> None:
"""Update DatasetInfo from the JSON files in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
Raises:
FileNotFoundError: If the dataset_info.json can't be found.
"""
logging.info("Load dataset info from %s", dataset_info_dir)
json_filename = self._dataset_info_path(dataset_info_dir)
if not tf.io.gfile.exists(json_filename):
raise FileNotFoundError(
"Tried to load `DatasetInfo` from a directory which does not exist or"
" does not contain `dataset_info.json`. Please delete the directory "
f"`{dataset_info_dir}` if you are trying to re-generate the "
"dataset.")
# Load the metadata from disk
parsed_proto = read_from_json(json_filename)
# Update splits
filename_template = naming.ShardedFileTemplate(
dataset_name=self._builder.name,
data_dir=self.data_dir,
filetype_suffix=parsed_proto.file_format or "tfrecord")
split_dict = splits_lib.SplitDict.from_proto(
repeated_split_infos=parsed_proto.splits,
filename_template=filename_template)
self.set_splits(split_dict)
# Restore the feature metadata (vocabulary, labels names,...)
if self.features:
self.features.load_metadata(dataset_info_dir)
# For `ReadOnlyBuilder`, reconstruct the features from the config.
elif tf.io.gfile.exists(feature_lib.make_config_path(dataset_info_dir)):
self._features = feature_lib.FeatureConnector.from_config(
dataset_info_dir)
# Restore the MetaDataDict from metadata.json if there is any
if (self.metadata is not None or
tf.io.gfile.exists(_metadata_filepath(dataset_info_dir))):
# If the dataset was loaded from file, self.metadata will be `None`, so
# we create a MetadataDict first.
if self.metadata is None:
self._metadata = MetadataDict()
self.metadata.load_metadata(dataset_info_dir)
# Update fields which are not defined in the code. This means that
# the code will overwrite fields which are present in
# dataset_info.json.
for field_name, field in self.as_proto.DESCRIPTOR.fields_by_name.items():
field_value = getattr(self._info_proto, field_name)
field_value_restored = getattr(parsed_proto, field_name)
try:
is_defined = self._info_proto.HasField(field_name)
except ValueError:
is_defined = bool(field_value)
try:
is_defined_in_restored = parsed_proto.HasField(field_name)
except ValueError:
is_defined_in_restored = bool(field_value_restored)
# If field is defined in code, we ignore the value
if is_defined:
if field_value != field_value_restored:
logging.info(
"Field info.%s from disk and from code do not match. Keeping "
"the one from code.", field_name)
continue
# If the field is also not defined in JSON file, we do nothing
if not is_defined_in_restored:
continue
# Otherwise, we restore the dataset_info.json value
if field.type == field.TYPE_MESSAGE:
field_value.MergeFrom(field_value_restored)
else:
setattr(self._info_proto, field_name, field_value_restored)
if self._builder._version != self.version: # pylint: disable=protected-access
raise AssertionError(
"The constructed DatasetInfo instance and the restored proto version "
"do not match. Builder version: {}. Proto version: {}".format(
self._builder._version, self.version)) # pylint: disable=protected-access
# Mark as fully initialized.
self._fully_initialized = True
def initialize_from_bucket(self) -> None:
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
data_files = gcs_utils.gcs_dataset_info_files(self.full_name)
if not data_files:
return
logging.info(
"Load pre-computed DatasetInfo (eg: splits, num examples,...) "
"from GCS: %s", self.full_name)
for fname in data_files:
out_fname = os.path.join(tmp_dir, os.path.basename(fname))
tf.io.gfile.copy(os.fspath(gcs_utils.gcs_path(fname)), out_fname)
self.read_from_directory(tmp_dir)
def __repr__(self):
SKIP = object() # pylint: disable=invalid-name
splits = _indent("\n".join(
["{"] +
[f" '{k}': {split}," for k, split in sorted(self.splits.items())] +
["}"]))
if self._info_proto.config_description:
config_description = _indent(
f'"""\n{self._info_proto.config_description}\n"""')
else:
config_description = SKIP
lines = ["tfds.core.DatasetInfo("]
for key, value in [
("name", repr(self.name)),
("full_name", repr(self.full_name)),
("description", _indent(f'"""\n{self.description}\n"""')),
("config_description", config_description),
("homepage", repr(self.homepage)),
("data_path", repr(self.data_dir)),
("download_size", self.download_size),
("dataset_size", self.dataset_size),
("features", _indent(repr(self.features))),
("supervised_keys", self.supervised_keys),
("disable_shuffling", self.disable_shuffling),
("splits", splits),
("citation", _indent(f'"""{self.citation}"""')),
# Proto add a \n that we strip.
("redistribution_info", str(self.redistribution_info).strip() or SKIP),
]:
if value != SKIP:
lines.append(f" {key}={value},")
lines.append(")")
return "\n".join(lines)
def _nest_to_proto(nest: Nest) -> dataset_info_pb2.SupervisedKeys.Nest:
"""Creates a `SupervisedKeys.Nest` from a limited `tf.nest` style structure.
Args:
nest: A `tf.nest` structure of tuples, dictionaries or string feature keys.
Returns:
The same structure as a `SupervisedKeys.Nest` proto.
"""
nest_type = type(nest)
proto = dataset_info_pb2.SupervisedKeys.Nest()
if nest_type is tuple:
for item in nest:
proto.tuple.items.append(_nest_to_proto(item))
elif nest_type is dict:
nest = {key: _nest_to_proto(value) for key, value in nest.items()}
proto.dict.CopyFrom(dataset_info_pb2.SupervisedKeys.Dict(dict=nest))
elif nest_type is str:
proto.feature_key = nest
else:
raise ValueError("The nested structures in `supervised_keys` must only "
"contain instances of (tuple, dict, str), no subclasses.\n"
f"Found type: {nest_type}")
return proto
def _supervised_keys_to_proto(
keys: SupervisedKeysType) -> dataset_info_pb2.SupervisedKeys:
"""Converts a `supervised_keys` tuple to a SupervisedKeys proto."""
if not isinstance(keys, tuple) or len(keys) not in [2, 3]:
raise ValueError(
"`supervised_keys` must contain a tuple of 2 or 3 elements.\n"
f"got: {keys!r}")
proto = dataset_info_pb2.SupervisedKeys(
tuple=dataset_info_pb2.SupervisedKeys.Tuple(
items=(_nest_to_proto(key) for key in keys)))
return proto
def _nest_from_proto(proto: dataset_info_pb2.SupervisedKeys.Nest) -> Nest:
"""Creates a `tf.nest` style structure from a `SupervisedKeys.Nest` proto.
Args:
proto: A `SupervisedKeys.Nest` proto.
Returns:
The proto converted to a `tf.nest` style structure of tuples, dictionaries
or strings.
"""
if proto.HasField("tuple"):
return tuple(_nest_from_proto(item) for item in proto.tuple.items)
elif proto.HasField("dict"):
return {
key: _nest_from_proto(value)
for key, value in sorted(proto.dict.dict.items())
}
elif proto.HasField("feature_key"):
return proto.feature_key
else:
raise ValueError("`SupervisedKeys.Nest` proto must contain one of "
f"(tuple, dict, feature_key). Got: {proto}")
def _supervised_keys_from_proto(
proto: dataset_info_pb2.SupervisedKeys) -> SupervisedKeysType:
"""Converts a `SupervisedKeys` proto back to a simple python tuple."""
if proto.input and proto.output:
return (proto.input, proto.output)
elif proto.tuple:
return tuple(_nest_from_proto(item) for item in proto.tuple.items)
else:
raise ValueError("A `SupervisedKeys` proto must have either `input` and "
"`output` defined, or `tuple`, got: {proto}")
def _indent(content):
"""Add indentation to all lines except the first."""
lines = content.split("\n")
return "\n".join([lines[0]] + [" " + l for l in lines[1:]])
def _populate_shape(shape_or_dict, prefix, schema_features):
"""Populates shape in the schema."""
if isinstance(shape_or_dict, (tuple, list)):
feature_name = "/".join(prefix)
if shape_or_dict and feature_name in schema_features:
schema_feature = schema_features[feature_name]
schema_feature.ClearField("shape")
for dim in shape_or_dict:
# We denote `None`s as -1 in the shape proto.
schema_feature.shape.dim.add().size = -1 if dim is None else dim
return
for name, val in shape_or_dict.items():
prefix.append(name)
_populate_shape(val, prefix, schema_features)
prefix.pop()
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
tfdv = lazy_imports_lib.lazy_imports.tensorflow_data_validation
# TODO(epot): Avoid hardcoding file format.
filetype_suffix = "tfrecord"
if filetype_suffix not in ["tfrecord", "csv"]:
raise ValueError(
"Cannot generate statistics for filetype {}".format(filetype_suffix))
filename_template = naming.ShardedFileTemplate(
data_dir=builder.data_dir,
dataset_name=builder.name,
split=split,
filetype_suffix=filetype_suffix)
filepattern = filename_template.sharded_filepaths_pattern()
# Avoid generating a large number of buckets in rank histogram
# (default is 1000).
stats_options = tfdv.StatsOptions(
num_top_values=10, num_rank_histogram_buckets=10)
if filetype_suffix == "csv":
statistics = tfdv.generate_statistics_from_csv(
filepattern, stats_options=stats_options)
else:
statistics = tfdv.generate_statistics_from_tfrecord(
filepattern, stats_options=stats_options)
schema = tfdv.infer_schema(statistics)
schema_features = {feature.name: feature for feature in schema.feature}
# Override shape in the schema.
for feature_name, feature in builder.info.features.items():
_populate_shape(feature.shape, [feature_name], schema_features)
# Remove legacy field.
if getattr(schema, "generate_legacy_feature_spec", None) is not None:
schema.ClearField("generate_legacy_feature_spec")
return statistics.datasets[0], schema
def read_from_json(path: epath.PathLike) -> dataset_info_pb2.DatasetInfo:
"""Read JSON-formatted proto into DatasetInfo proto."""
json_str = epath.Path(path).read_text()
# Parse it back into a proto.
parsed_proto = json_format.Parse(json_str, dataset_info_pb2.DatasetInfo())
return parsed_proto
def read_proto_from_builder_dir(
builder_dir: epath.PathLike) -> dataset_info_pb2.DatasetInfo:
"""Reads the dataset info from the given builder dir.
Args:
builder_dir: The folder that contains the dataset info files.
Returns:
The DatasetInfo proto as read from the builder dir.
Raises:
FileNotFoundError: If the builder_dir does not exists.
"""
info_path = os.path.join(
os.path.expanduser(builder_dir), DATASET_INFO_FILENAME)
if not tf.io.gfile.exists(info_path):
raise FileNotFoundError(
f"Could not load dataset info: {info_path} does not exists.")
return read_from_json(info_path)
def pack_as_supervised_ds(
ds: tf.data.Dataset,
ds_info: DatasetInfo,
) -> tf.data.Dataset:
"""Pack `(input, label)` dataset as `{'key0': input, 'key1': label}`."""
if (ds_info.supervised_keys and isinstance(ds.element_spec, tuple) and
len(ds.element_spec) == 2):
x_key, y_key = ds_info.supervised_keys
ds = ds.map(lambda x, y: {x_key: x, y_key: y})
return ds
else: # If dataset isn't a supervised tuple (input, label), return as-is
return ds
def _metadata_filepath(data_dir):
return os.path.join(data_dir, METADATA_FILENAME)
class MetadataDict(Metadata, dict):
"""A `tfds.core.Metadata` object that acts as a `dict`.
By default, the metadata will be serialized as JSON.
"""
def save_metadata(self, data_dir):
"""Save the metadata."""
with tf.io.gfile.GFile(_metadata_filepath(data_dir), "w") as f:
json.dump(self, f)
def load_metadata(self, data_dir):
"""Restore the metadata."""
self.clear()
with tf.io.gfile.GFile(_metadata_filepath(data_dir), "r") as f:
self.update(json.load(f))
class BeamMetadataDict(MetadataDict):
"""A `tfds.core.Metadata` object supporting Beam-generated datasets."""
def __init__(self, *args, **kwargs):
super(BeamMetadataDict, self).__init__(*args, **kwargs)
self._tempdir = tempfile.mkdtemp("tfds_beam_metadata")
def _temp_filepath(self, key):
return os.path.join(self._tempdir, "%s.json" % key)
def __setitem__(self, key, item):
"""Creates write sink for beam PValues or sets value of key in `dict`.
If the item is a PValue, it is expected to contain exactly one element,
which will be written out as a temporary JSON file once the beam pipeline
runs. These outputs will be loaded and stored in a single JSON when
`save_metadata` is called after the pipeline completes.
Args:
key: hashable type, the key for the item.
item: `beam.pvalue.PValue` or other, the metadata value.
"""
beam = lazy_imports_lib.lazy_imports.apache_beam
if isinstance(item, beam.PTransform):
# Implementing Beam support might be possible but would
# require very careful implementation to avoid computing the
# PTransform twice (once for the split and once for the metadata).
raise NotImplementedError(
"`tfds.core.BeamMetadataDict` can\'t be used on `beam.PTransform`, "
"only on `beam.PCollection`. See `_generate_examples` doc on how "
"to use `beam.PCollection`, or wrap your `_generate_examples` inside "
f"a @beam.ptransform_fn. Got: {key}: {item}")
elif isinstance(item, beam.pvalue.PValue):
if key in self:
raise ValueError("Already added PValue with key: %s" % key)
logging.info("Lazily adding metadata item with Beam: %s", key)
def _to_json(item_list):
if len(item_list) != 1:
raise ValueError(
"Each metadata PValue must contain a single element. Got %d." %
len(item_list))
item = item_list[0]
return json.dumps(item)
_ = (
item
| "metadata_%s_tolist" % key >> beam.combiners.ToList()
| "metadata_%s_tojson" % key >> beam.Map(_to_json)
| "metadata_%s_write" % key >> beam.io.WriteToText(
self._temp_filepath(key),
num_shards=1,
shard_name_template="",
))
super(BeamMetadataDict, self).__setitem__(key, item)
def save_metadata(self, data_dir):
"""Save the metadata inside the beam job."""
beam = lazy_imports_lib.lazy_imports.apache_beam
for key, item in self.items():
if isinstance(item, beam.pvalue.PValue):
with tf.io.gfile.GFile(self._temp_filepath(key), "r") as f:
self[key] = json.load(f)
tf.io.gfile.rmtree(self._tempdir)
super(BeamMetadataDict, self).save_metadata(data_dir)
| tensorflow/datasets | tensorflow_datasets/core/dataset_info.py | Python | apache-2.0 | 33,196 |
"""
https://leetcode.com/problems/uncommon-words-from-two-sentences/
https://leetcode.com/submissions/detail/182166803/
"""
class Solution:
def uncommonFromSentences(self, A, B):
"""
:type A: str
:type B: str
:rtype: List[str]
"""
apt = dict()
def traverse(sentence):
for word in sentence.split(' '):
if word in apt:
apt[word] += 1
else:
apt[word] = 1
traverse(A)
traverse(B)
ans = []
for word in apt:
if apt[word] == 1:
ans.append(word)
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.uncommonFromSentences(
"this apple is sweet", "this apple is sour"),
["sweet", "sour"]
)
self.assertEqual(solution.uncommonFromSentences(
"apple apple", "banana"),
["banana"]
)
if __name__ == '__main__':
unittest.main()
| vivaxy/algorithms | python/problems/uncommon_words_from_two_sentences.py | Python | mit | 1,098 |
''' Symmetrize weights in the active group.'''
'''
*******************************************************************************
License and Copyright
Copyright 2012 Jordan Hueckstaedt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
bl_info = {
'name': 'Symmetrize Weights',
'author': 'Jordan Hueckstaedt',
'version': (1, 0),
'blender': (2, 63, 0),
'location': 'View > Weight Tools > Symmetrize',
'warning': '', # used for warning icon and text in addons panel
'description': 'Symmetrize weights in the selected vertex group.',
"wiki_url": "",
"tracker_url": "",
"support": 'TESTING',
'category': 'Paint'
}
import bpy
class WeightPaintSymmetrize(bpy.types.Operator):
bl_idname = "object.weightpaint_symmetrize"
bl_label = "Symmetrize"
bl_options = {'REGISTER', 'UNDO'}
active_index = None
operation = bpy.props.EnumProperty(
name = "Operation",
items = (('RIGHT', "Right to Left", "Right to Left"),
('LEFT', "Left to Right", "Left to Right"),
),
)
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj and obj.mode == 'WEIGHT_PAINT' and obj.type == 'MESH' and len(obj.vertex_groups) > 0)
def restore_active_index(self, obj):
# This is a hack. For some reason the active vertex group changes during execution,
if self.active_index is not None:
obj.vertex_groups.active_index = self.active_index
def execute(self, context):
# Get right indexes
obj = context.object
verts = obj.data.vertices
if self.active_index is None:
self.active_index = context.active_object.vertex_groups.active_index
if self.operation == 'RIGHT':
other_indexes = [vert.index for vert in verts if vert.co[0] < 0]
else:
other_indexes = [vert.index for vert in verts if vert.co[0] > 0]
# Flip weights (Mirror operator also handles masking for free)
self.restore_active_index( obj )
bpy.ops.object.vertex_group_mirror(flip_group_names = False)
# Save flipped weights
weights = {}
for i in other_indexes:
for x, group in enumerate(obj.data.vertices[i].groups):
if group.group == self.active_index:
weights[i] = (x, group.weight)
break
# Restore weights
self.restore_active_index( obj )
bpy.ops.object.vertex_group_mirror(flip_group_names = False)
# Apply flipped weights
for x, i in enumerate(other_indexes):
if i in weights:
obj.data.vertices[i].groups[weights[i][0]].weight = weights[i][1]
obj.data.update()
self.restore_active_index( obj )
return{'FINISHED'}
def invoke(self, context, event):
self.active_index = context.active_object.vertex_groups.active_index
self.operation = context.scene.weightpaint_symmetrize_operation
return self.execute(context)
def panel_func(self, context):
row = self.layout.row(align = True)
row.alignment = 'EXPAND'
row.operator("object.weightpaint_symmetrize")
def register():
bpy.utils.register_module(__name__)
bpy.types.VIEW3D_PT_tools_weightpaint.append(panel_func)
bpy.types.Scene.weightpaint_symmetrize_operation = bpy.props.EnumProperty(
name = "Operation",
items = (('RIGHT', "Right to Left", "Right to Left"),
('LEFT', "Left to Right", "Left to Right"),
),
)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.VIEW3D_PT_tools_weightpaint.remove(panel_func)
del bpy.types.Scene.weightpaint_symmetrize_operation
if __name__ == "__main__":
register()
| assumptionsoup/Symmetrize-Weights | symmetrize_weights.py | Python | gpl-3.0 | 3,979 |
from bokeh.layouts import column
from bokeh.models.widgets import Div
from dashboard.bokeh.plots.descriptors.table import Table
from dashboard.bokeh.plots.descriptors.title import Title
from dashboard.bokeh.plots.patch.main import Patch
from qlf_models import QLFModels
from bokeh.resources import CDN
from bokeh.embed import file_html
class RMS:
def __init__(self, process_id, arm, spectrograph):
self.selected_process_id = process_id
self.selected_arm = arm
self.selected_spectrograph = spectrograph
def load_qa(self):
cam = self.selected_arm+str(self.selected_spectrograph)
mergedqa = QLFModels().get_output(self.selected_process_id, cam)
check_ccds = mergedqa['TASKS']['CHECK_CCDs']
getrms = check_ccds['METRICS']
nrg = check_ccds['PARAMS']['NOISE_AMP_NORMAL_RANGE']
wrg = check_ccds['PARAMS']['NOISE_AMP_WARN_RANGE']
if mergedqa['FLAVOR'].upper() == 'SCIENCE':
program = mergedqa['GENERAL_INFO']['PROGRAM'].upper()
program_prefix = '_'+program
else:
program_prefix = ''
refexp = mergedqa['TASKS']['CHECK_CCDs']['PARAMS']['NOISE_AMP' +
program_prefix+'_REF']
# amp 1
p = Patch().plot_amp(
dz=getrms["NOISE_AMP"],
refexp=refexp,
name="NOISE_AMP",
description="NOISE(photon counts)",
wrg=wrg
)
p_stat = Patch().plot_amp(
dz=getrms["NOISE_AMP"],
refexp=refexp,
name="NOISE_AMP (STATUS)",
description="NOISE (photon counts)",
wrg=wrg,
nrg=nrg,
status_plot=True
)
# amp 2
p2 = Patch().plot_amp(
dz=getrms["NOISE_OVERSCAN_AMP"],
refexp=refexp,
name="NOISE_OVERSCAN_AMP",
description="NOISE Overscan (photon counts)",
wrg=wrg
)
p2_stat = Patch().plot_amp(
dz=getrms["NOISE_OVERSCAN_AMP"],
refexp=refexp,
name="NOISE_OVERSCAN_AMP (STATUS)",
description="NOISE Overscan (photon counts)",
wrg=wrg,
nrg=nrg,
status_plot=True
)
info_col = Title().write_description('getrms')
# Prepare tables
current_exposures = check_ccds['METRICS']['NOISE_AMP']
gen_info = mergedqa['GENERAL_INFO']
flavor = mergedqa["FLAVOR"]
if flavor == 'science':
program = gen_info['PROGRAM'].upper()
reference_exposures = check_ccds['PARAMS']['LITFRAC_AMP_' +
program + '_REF']
else:
reference_exposures = check_ccds['PARAMS']['LITFRAC_AMP_REF']
keynames = ["NOISE_AMP" for i in range(len(current_exposures))]
table = Table().single_table(keynames, current_exposures, reference_exposures, nrg, wrg)
layout = column(info_col, Div(),
table, Div(),
column(p, sizing_mode='scale_both'),
column(p2, sizing_mode='scale_both'),
column(p_stat, sizing_mode='scale_both'),
column(p2_stat, sizing_mode='scale_both'),
css_classes=["display-grid"])
return file_html(layout, CDN, "GETRMS")
| linea-it/qlf | backend/framework/qlf/dashboard/bokeh/qagetrms/main.py | Python | gpl-3.0 | 3,450 |
# -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this project. If
# not, see <http://www.gnu.org/licenses/>.
import re
from django import forms
from .utils import clean_class_attrs
class BootstrapWidgetMixin(object):
css_classes = ''
"""CSS classes to be added to this element."""
def __init__(self, attrs=None, css_classes='', **kwargs):
attrs = attrs or {}
self._add_class(attrs, 'form-control')
# handle css_classes
for cls in self.__class__.mro():
css_classes += ' %s' % getattr(cls, 'css_classes', '')
css_classes = re.sub(' +', ' ', css_classes).strip()
if css_classes:
self._add_class(attrs, css_classes)
super(BootstrapWidgetMixin, self).__init__(attrs=attrs, **kwargs)
def build_attrs(self, base_attrs, extra_attrs=None):
attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs)
clean_class_attrs(attrs)
return attrs
def _add_class(self, attrs, cls):
if attrs.get('class'):
attrs['class'] += ' %s' % cls
else:
attrs['class'] = cls
class Media:
css = {
'all': ('bootstrap/css/bootstrap.css', ),
}
js = (
'bootstrap/js/bootstrap.js',
)
class MergeClassesMixin(object):
"""Mixin to merge CSS classes from runtime and from the instances class attributes.
This is most commonly used in MultiWidgets children, where extra_args contains the CSS classes from the
parent widget.
"""
def build_attrs(self, base_attrs, extra_attrs=None):
if extra_attrs is None or 'class' not in base_attrs or 'class' not in extra_attrs:
return super().build_attrs(base_attrs, extra_attrs=extra_attrs)
extra_attrs['class'] = '%s %s' % (base_attrs.pop('class', ''), extra_attrs.pop('class', ''))
return super().build_attrs(base_attrs, extra_attrs=extra_attrs)
class BootstrapMultiWidget(BootstrapWidgetMixin, forms.MultiWidget):
template_name = 'bootstrap/forms/widgets/multiwidget.html'
class BootstrapTextInput(BootstrapWidgetMixin, forms.TextInput):
template_name = 'bootstrap/forms/widgets/text.html'
class BootstrapTextarea(BootstrapWidgetMixin, forms.Textarea):
pass
class BootstrapEmailInput(BootstrapWidgetMixin, forms.EmailInput):
template_name = 'bootstrap/forms/widgets/text.html'
class BootstrapPasswordInput(BootstrapWidgetMixin, forms.PasswordInput):
template_name = 'bootstrap/forms/widgets/password.html'
def build_attrs(self, base_attrs, extra_attrs=None):
if self.is_required:
base_attrs['required'] = ''
return super().build_attrs(base_attrs, extra_attrs=extra_attrs)
class BootstrapSetPasswordInput(BootstrapPasswordInput):
css_classes = 'set-password'
class BootstrapConfirmPasswordInput(BootstrapPasswordInput):
css_classes = 'confirm-password'
class BootstrapSelect(BootstrapWidgetMixin, forms.Select):
css_classes = 'custom-select'
class BootstrapFileInput(BootstrapWidgetMixin, forms.ClearableFileInput):
template_name = 'bootstrap/forms/widgets/file_input.html'
css_classes = 'custom-file-input'
def build_attrs(self, base_attrs, extra_attrs=None):
# remove form-control
base_attrs['class'] = base_attrs['class'].replace('form-control', '')
return super().build_attrs(base_attrs, extra_attrs=extra_attrs)
class BootstrapCheckboxInput(BootstrapWidgetMixin, forms.CheckboxInput):
css_classes = 'form-check-input'
def build_attrs(self, base_attrs, extra_attrs=None):
# remove form-control
base_attrs['class'] = base_attrs['class'].replace('form-control', '')
return super().build_attrs(base_attrs, extra_attrs=extra_attrs)
| jabber-at/hp | hp/bootstrap/widgets.py | Python | gpl-3.0 | 4,414 |
def suffixArray(s):
n = len(s)
rkd = {c: i for i, c in enumerate(sorted(set(s)))}
rank = [rkd[c] for c in s]
k = 1
while k <= n:
xy = [(rank[i], (rank[i+k] if i+k < n else -1)) for i in xrange(n)]
rkd = {c: i for i, c in enumerate(sorted(set(xy)))}
rank = [rkd[c] for c in xy]
k *= 2
sa = [0] * n
for i in xrange(n):
sa[rank[i]] = i
height = [0] * n
h = 0
for i in xrange(n):
if rank[i]:
if h > 0:
h -= 1
j = sa[rank[i]-1]
while i+h < n and j+h < n and s[i+h] == s[j+h]:
h += 1
else:
h = 0
height[rank[i]] = h
return sa, height
if __name__ == '__main__':
sa, ht = suffixArray('aaaaab')
| scturtle/DSpy | suffixArray.py | Python | unlicense | 781 |
"""
Django settings for paperlink_backend project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'za1o&9odl$wr)8j-4v9just63tt3anjbt@(mxy!!2h@srub-t#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paperlink',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paperlink_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paperlink_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| Dawnflying/PaperFriends | paperlink_backend/paperlink_backend/settings.py | Python | apache-2.0 | 3,109 |
# coding: utf-8
#------------------------------------------------------------------------------
# Copyright 2017 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
import arcpy
import os
# Add parent folder to python path if running test case standalone
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import Configuration
import UnitTestUtilities
import DataDownload
import arcpyAssert
class GeoNamesTestCase(unittest.TestCase, arcpyAssert.FeatureClassAssertMixin):
toolboxUnderTest = None # Set to Pro or ArcMap toolbox at runtime
toolboxAlias = 'defensegeonames'
loadResultsFeatureClass = None
templateFeatureClass = None
def setUp(self):
if Configuration.DEBUG == True: print(" GeoNamesTestCase.setUp")
''' Initialization needed if running Test Case standalone '''
Configuration.GetLogger()
Configuration.GetPlatform()
''' End standalone initialization '''
self.toolboxUnderTest = Configuration.geonamesToolboxPath + \
Configuration.GetToolboxSuffix()
UnitTestUtilities.checkArcPy()
DataDownload.runDataDownload(Configuration.geonamesDataPath, \
Configuration.geonamesInputGDB, Configuration.geonamesURL)
self.templateFeatureClass = os.path.join(Configuration.geonamesInputGDB, "GeonamesTemplate")
self.loadResultsFeatureClass = os.path.join(Configuration.geonamesInputGDB, "MonacoResults")
UnitTestUtilities.checkFilePaths([Configuration.geonamesDataPath])
UnitTestUtilities.checkGeoObjects([self.toolboxUnderTest, \
Configuration.geonamesInputGDB, self.loadResultsFeatureClass, \
self.templateFeatureClass])
def tearDown(self):
if Configuration.DEBUG == True: print(" GeoNamesTestCase.tearDown")
def test_load_geonames(self):
if Configuration.DEBUG == True: print(".....GeoNamesTestCase.test_load_geonames")
loadTextfile = os.path.join(Configuration.geonamesDataPath, "mn.txt")
loadFeatureClass = os.path.join(Configuration.geonamesInputGDB, "LoadMonacoTest")
countryCodes = os.path.join(Configuration.geonamesInputGDB, "CountryCodes")
adminCodes = os.path.join(Configuration.geonamesInputGDB, "AdminCodes")
featureCodes = os.path.join(Configuration.geonamesInputGDB, "FeatureCodes")
# Delete the feature class used to load if already exists
if arcpy.Exists(loadFeatureClass) :
arcpy.Delete_management(loadFeatureClass)
# Copy from a template feature class with required fields
arcpy.Copy_management(self.templateFeatureClass, loadFeatureClass)
arcpy.ImportToolbox(self.toolboxUnderTest, self.toolboxAlias)
# Execute the Tool under test:
arcpy.LoadGeonamesFile_defensegeonames(loadFeatureClass, loadTextfile, \
countryCodes, adminCodes, featureCodes)
# Simple Check
inputFeatureCount = int(arcpy.GetCount_management(loadFeatureClass).getOutput(0))
self.assertGreater(inputFeatureCount, int(50))
# Full Check
self.assertFeatureClassEqual(self.loadResultsFeatureClass, loadFeatureClass, "OBJECTID")
def test_create_geonames_gazetteer(self):
if Configuration.DEBUG == True: print(".....GeoNamesTestCase.test_create_geonames_gazetteer")
arcpy.ImportToolbox(self.toolboxUnderTest, self.toolboxAlias)
locator = os.path.join(Configuration.geonamesDataPath, "GeoNamesLocator")
# Does not seem to work with locators:
# Delete if already exists
# if arcpy.Exists(locator) :
# arcpy.Delete_management(locator)
# Use this instead:
arcpy.env.overwriteOutput = True
# Just use the known good results feature class for the locator test
locatorFeatureClass = self.loadResultsFeatureClass
arcpy.CreateGeonamesGazetteerLocator_defensegeonames(locatorFeatureClass, locator)
self.assertTrue(arcpy.Exists(locator))
if __name__ == "__main__":
unittest.main()
| Esri/solutions-geoprocessing-toolbox | utils/test/geonames_tests/GeoNamesTestCase.py | Python | apache-2.0 | 4,726 |
"""The tests device sun light trigger component."""
# pylint: disable=protected-access
from datetime import datetime
from asynctest import patch
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, device_sun_light_trigger)
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.light import common as common_light
@pytest.fixture
def scanner(hass):
"""Initialize components."""
scanner = getattr(
hass.components, 'test.device_tracker').get_scanner(None, None)
scanner.reset()
scanner.come_home('DEV1')
getattr(hass.components, 'test.light').init()
with patch(
'homeassistant.components.device_tracker.load_yaml_config_file',
return_value={
'device_1': {
'hide_if_away': False,
'mac': 'DEV1',
'name': 'Unnamed Device',
'picture': 'http://example.com/dev1.jpg',
'track': True,
'vendor': None
},
'device_2': {
'hide_if_away': False,
'mac': 'DEV2',
'name': 'Unnamed Device',
'picture': 'http://example.com/dev2.jpg',
'track': True,
'vendor': None}
}):
assert hass.loop.run_until_complete(async_setup_component(
hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
assert hass.loop.run_until_complete(async_setup_component(
hass, light.DOMAIN, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
return scanner
async def test_lights_on_when_sun_sets(hass, scanner):
"""Test lights go on when there is someone home and the sun sets."""
test_time = datetime(2017, 4, 5, 1, 2, 3, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
await common_light.async_turn_off(hass)
test_time = test_time.replace(hour=3)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert light.is_on(hass)
async def test_lights_turn_off_when_everyone_leaves(hass, scanner):
"""Test lights turn off when everyone leaves the house."""
await common_light.async_turn_on(hass)
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
hass.states.async_set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
await hass.async_block_till_done()
assert not light.is_on(hass)
async def test_lights_turn_on_when_coming_home_after_sun_set(hass, scanner):
"""Test lights turn on when coming home after sun set."""
test_time = datetime(2017, 4, 5, 3, 2, 3, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
await common_light.async_turn_off(hass)
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
hass.states.async_set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
await hass.async_block_till_done()
assert light.is_on(hass)
| jnewland/home-assistant | tests/components/device_sun_light_trigger/test_init.py | Python | apache-2.0 | 3,684 |
#!/usr/bin/env python
# A comment, this is so you can read your program later.
# Anything after the # is ignored by python.
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "This won't run."
print "This will run."
| moralesjason/learnpythonthehardway | ex2commentsandpoundcharacters.py | Python | gpl-3.0 | 323 |
import unittest
from flumine import config
class ConfigTest(unittest.TestCase):
def test_init(self):
self.assertFalse(config.simulated)
self.assertTrue(config.simulated_strategy_isolation)
self.assertIsInstance(config.customer_strategy_ref, str)
self.assertIsInstance(config.process_id, int)
self.assertIsNone(config.current_time)
self.assertFalse(config.raise_errors)
self.assertEqual(config.max_execution_workers, 32)
self.assertFalse(config.async_place_orders)
self.assertEqual(config.place_latency, 0.120)
self.assertEqual(config.cancel_latency, 0.170)
self.assertEqual(config.update_latency, 0.150)
self.assertEqual(config.replace_latency, 0.280)
self.assertEqual(config.order_sep, "-")
self.assertEqual(config.execution_retry_attempts, 10)
| liampauling/flumine | tests/test_config.py | Python | mit | 865 |
# -*- coding: utf-8 -*-
{
'name': "Recepción",
'summary': """
Módulo de Gestión de visitantes a la recepción""",
'description': """
Módulo de Gestión de visitantes a Recepción
===========================================
Registra las visitas a FOMDES especificando la fecha y la dependencia destino.
Genera como reportes visitas por rangos de fechas y por dependencia.
""",
'author': "Cooperativa Saní Tecnologías Comunes",
'website': "http://sani.org.ve",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'CRM',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'templates.xml',
'models/model_recepcion.py',
'views/recepcion.xml'
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
'installable': True,
'application': True,
'auto_install': False,
} | sani-coop/tinjaca | addons/recepcion/__openerp__.py | Python | gpl-2.0 | 1,153 |
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import subprocess
import unittest
from dlg import common
from dlg.common import tool
from dlg.testutils import ManagerStarter
class TestTool(ManagerStarter, unittest.TestCase):
def test_cmdhelp(self):
"""Checks that all dlg commands have a help"""
for cmd in tool.commands:
p = tool.start_process(cmd, ['-h'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
common.wait_or_kill(p, timeout=10)
self.assertEqual(0, p.returncode, 'cmd: %s, out: %s' % (
cmd + ' -h', common.b2s(out + err)))
| steve-ord/daliuge | daliuge-engine/test/test_tool.py | Python | lgpl-2.1 | 1,587 |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(405)
def method_not_allowed(e):
return render_template('405.html'), 405
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
| cdcf/time_tracker | app/main/errors.py | Python | bsd-3-clause | 392 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
s3_to_redshift.py is uses the RedshiftPostgres class (see redshift_psql.py)
to copy appropriately formatted data from s3 into a table in redshift. Note
LOAD is kept within the Python file, and create table is read from the
schema/db.sql file.
"""
import copy
import os
import re
import sys
import string
import time
import traceback
from datetime import datetime
from datetime import timedelta
from os.path import join
from yaml import safe_load
from staticconf import read_int
from staticconf import read_string
from staticconf import YamlConfiguration
from sherlock.common.redshift_psql import get_namespaced_tablename
from sherlock.common.redshift_psql import get_redshift_schema
from sherlock.common.redshift_psql import RedshiftPostgres
from sherlock.common.redshift_status import RedshiftStatusTable
from sherlock.common.dynamodb_status import DynamoDbStatusTable
from sherlock.common.pipeline import add_load_args
from sherlock.common.pipeline import get_base_parser
from sherlock.common.pipeline import get_formatted_date
from sherlock.common.pipeline import get_yaml_table_versions
from sherlock.common.pipeline import MAX_TTL_DAYS
from sherlock.common.pipeline import pipeline_yaml_schema_file_path
from sherlock.common.loggers import PipelineStreamLogger
from sherlock.common.redshift_schema import RedShiftLogSchema
from sherlock.common.util import load_from_file
from sherlock.tools.maint import compact_table
from sherlock.tools.schema2sql import tables_to_sql
from sherlock.tools.schema2sql import mk_create_table_sql_cmd
from sherlock.common.config_util import load_package_config
ERROR_MSG_COL_SIZE = 256
#
# queries requiring the redshift search_path to include the schema
# note: we have to set this prior to running these queries otherwise
# the non-default schemas won't show up in pg_table_def. The setup
# is done automatically in the run_psql query
#
QUERY_GET_SORT_COLUMN = '''SELECT "column", type FROM pg_table_def \
WHERE tablename = %(tablename)s AND sortkey = 1 \
AND schemaname = %(schemaname)s'''
QUERY_TABLE_DEF = '''SELECT SchemaName, TableName, "Column", Type, Encoding, \
DistKey, SortKey, "NotNull" from pg_table_def where tablename=%(tablename)s \
AND schemaname=%(schemaname)s'''
#
# queries requiring namespaced tablenames
#
# LOAD gets a table_name, s3_input_filename, aws_key, and aws_secret
#
LOAD = """\
copy %s from '%s/part' \
CREDENTIALS \
'aws_access_key_id=%%s;aws_secret_access_key=%%s;token=%%s' \
delimiter '%s' \
ESCAPE gzip TRUNCATECOLUMNS TIMEFORMAT as 'epochmillisecs' \
NULL AS '\\0' STATUPDATE ON;"""
QUERY_GET_MIN_MAX_DATE = """SELECT min({1}), max({1}) from {0}"""
QUERY_DELETE_ROWS_BY_DATE = """DELETE from {0} \
where {1} < %(new_min_date)s"""
QUERY_ADD_COLUMN = """alter table {0} \
add column {1} {2} encode {3} {4} default {5}"""
def get_create_commands(input_file, add_error_table=True):
"""
get_create_command takes an input file and reads the create table sql
command from it.
Args:
input_file -- the full path of the input file
add_error_table -- boolean flag to add error table to schema
Returns:
a list of (command, table_name) tuples where the command is a SQL command
for creating the table, and the table_name is the name of the table to be
created. Important because we don't want to create a table that already
exists
"""
# in regex \S is all non-whitespace and \s is whitespace only
table_regex = re.compile(r'[\s]*(?P<tablename>[\S]+[\s]*)\(')
command = load_from_file(input_file)
if input_file[-5:] == ".yaml":
rs_log_schema = RedShiftLogSchema(safe_load(command))
if add_error_table:
err_tbl_name, err_tbl = rs_log_schema.get_error_table()
rs_log_schema.table_add(err_tbl_name, err_tbl)
command = tables_to_sql(rs_log_schema.tables())
commands = command.split('CREATE TABLE')
table_create_tuples = []
for cmd in commands[1:]:
match = table_regex.search(cmd)
if match is None:
table_name = None
else:
table_name = match.group('tablename')
table_to_create = get_namespaced_tablename(table_name)
cmd = cmd.replace(table_name, table_to_create, 1)
table_create_tuples.append((table_name, "create table " + cmd))
return table_create_tuples
def get_current_tables(rs_psql, database):
"""
get_current_tables gets a list of current tables
Args:
rs_psql -- the RedshiftPostgres object used to run the SQL command
database -- the redshift db name to which we copy the table
Returns:
a list of table names
"""
schemaname = get_redshift_schema()
SELECT = "SELECT tablename \
FROM pg_catalog.pg_tables \
where schemaname=%(schemaname)s"
params = {'schemaname': schemaname}
tables = rs_psql.run_sql(
SELECT,
database,
'getting existing tables',
output=True,
params=params
)
return [tbl_name for (tbl_name,) in tables]
def handle_error(error_msg, logstream):
"""
handle_error handles writes error messages to the log, and
raise Exception.
Args:
error_msg -- the error to write
"""
logstream.write_msg('error', error_msg=error_msg)
raise Exception(error_msg)
def get_table_creates(schema_file, logstream):
"""
get_table_creates checks that the schema file exists then calls
get_create_commands to get a list of tuples (see Returns).
Args:
schema_file -- the name of the schema file with the create table command
Returns:
create_tuples -- a list of (table_name, create_command) tuples, one for
each tuple to be created
"""
create_commands = get_create_commands(schema_file)
for _, table_name in create_commands:
if not table_name:
handle_error("no table name in file: {0}".format(
schema_file), logstream
)
return create_commands
def dates_from_rs_status(status_helper, db, logstream,
retry_on_err, single_date=None):
"""
date_from_rs_status gets the jobs that have completed the et step, but
have not started the load step, and have no jobs before them running or
in error
Args:
status_helper -- a wrapper around a backing store to aid in CRUD
db -- is the database we query
logstream -- a PipelineStreamLogger
retry_on_err -- a boolean, True if we're retrying on errors
single_date -- date string of the form YYYY-MM-DD if we're \
only looking for one
Returns:
a list of dates to catch up on formatted as strings YYYY/MM/DD
"""
versions = get_yaml_table_versions(pipeline_yaml_schema_file_path())
if single_date is not None:
data_date = get_formatted_date(single_date)
if data_date is None:
handle_error("bad input date: {0}".format(single_date), logstream)
start_datetime = datetime.strptime(data_date, "%Y/%m/%d")
status_tuples = \
status_helper.query_et_complete_job(db, versions, data_date)
else:
days_back = read_int('pipeline.load_step.days_to_check') + 1
start_datetime = datetime.utcnow() - timedelta(days=days_back)
status_tuples = \
status_helper.query_et_complete_jobs(db, versions, start_datetime)
if status_tuples is False:
handle_error(
"query for complete et job failed, version={0}, date={1}".format(
versions,
data_date if single_date is not None else start_datetime
),
logstream
)
candidates = []
last_date = (start_datetime - timedelta(days=1)).strftime("%Y/%m/%d")
for ddate, ld_status in status_tuples:
if not one_day_greater(ddate, last_date):
break
elif ld_status is None or (ld_status == 'error' and retry_on_err):
candidates.append(ddate)
elif ld_status == 'error':
break
last_date = ddate
candidate_string = "candidates dates for load: {0}".format(candidates)
logstream.write_msg(status='running', extra_msg=candidate_string)
return candidates
def one_day_greater(recent_date, past_date):
"""
simple function finds out whether two dates differ by 1 day
Args:
recent_date -- the more recent of the two dates in string format YYYY/MM/DD
past_date -- the older of the two dates in format string YYYY/MM/DD
Return:
True / False -- whether the recent date is one day greater than the old;
"""
rd = datetime.strptime(str(recent_date), "%Y/%m/%d")
pd = datetime.strptime(str(past_date), "%Y/%m/%d")
return (rd - pd == timedelta(days=1))
def get_timestamp_column_name(psql, db_name, table):
"""
auto detect timestamp column name that is also a sort key.
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
table -- table name
Return: column name
Throw: ValueError
"""
rs_schema = get_redshift_schema()
params = {
'tablename': table,
'schemaname': rs_schema,
}
result = psql.run_sql(
QUERY_GET_SORT_COLUMN,
db_name,
"find sort column",
params=params,
output=True,
schema=rs_schema
)
column = get_sortkey_column(result, 'timestamp')
if len(column) == 0:
column = get_sortkey_column(result, 'date')
if len(column) == 0:
return None
if len(column) > 1:
raise ValueError("too many sort columns in {0}".format(table))
return column.pop()
def get_sortkey_column(query_result, expected_sortkey):
column = []
for col, sql_type in query_result:
if sql_type.lower().find(expected_sortkey) >= 0:
column.append(col)
return column
def get_min_max_date(psql, db_name, table, column):
"""
Determine oldest, freshest data in a table
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
table -- table name
column -- timestamp column name found via get_timestamp_column_name
Return: min_date, max_date
"""
query = QUERY_GET_MIN_MAX_DATE.format(
get_namespaced_tablename(table),
column
)
result = psql.run_sql(query, db_name, "get min,max date", output=True)
return result[0][0], result[0][1]
def delete_old_data(psql, db_name, table, ttl_days):
"""
Delete data older than TTL. Round-down min_date, max_date to 00:00:00 UTC
for cutoff calculation.
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
table -- table name
ttl_days -- max TTL of data in a table
Return: None
"""
cname = get_timestamp_column_name(psql, db_name, table)
if cname is None:
return 0
dt_min_date, dt_max_date = get_min_max_date(psql, db_name, table, cname)
if dt_min_date is None or dt_max_date is None:
return 0
# cutoff is always YYYY-MM-DD 00:00:00
dt_min = datetime(dt_min_date.year, dt_min_date.month, dt_min_date.day)
dt_max = datetime(dt_max_date.year, dt_max_date.month, dt_max_date.day)
num_days = (dt_max - dt_min).days
num_deleted = 0
if ttl_days is not None and num_days > ttl_days:
dt_new_min_date = dt_min + timedelta(days=num_days - ttl_days)
new_min_date = datetime.strftime(dt_new_min_date, "%Y-%m-%d %H:%M:%S")
query = QUERY_DELETE_ROWS_BY_DATE.format(
get_namespaced_tablename(table),
cname
)
params = {'new_min_date': new_min_date}
result = psql.run_sql_ex(query, db_name, "delete rows", params=params)
if result is not False:
match = re.search(r"^DELETE\s+(?P<num_deleted>\d+)$",
result.get('status', ''))
num_deleted = int(match.group('num_deleted')) if match else 0
if num_deleted <= 0:
raise ValueError("nothing to delete for {0}".format(table))
return num_deleted
class PgTableDef(object):
SchemaName, TableName, Column, Type, \
Encoding, DistKey, SortKey, NotNull = range(8)
def get_table_def(psql, db, tablename):
""" Retrieve table definition stored in the database.
Table definitions are in pg_table_def, see QUERY_TABLE_DEF for details
tablename -- table name for which to get definition
Returns: A list of tuples. Each entry in the list is for a table column,
Each tuple describes column attributes such as name, encoding, etc.
NOTE: For tables not in the database, get_table_def returns empty list
"""
rs_schema = get_redshift_schema()
param_dict = {
'tablename': tablename,
'schemaname': rs_schema,
}
results = psql.run_sql(
QUERY_TABLE_DEF,
db,
"getting table def",
params=param_dict,
output=True,
schema=rs_schema
)
return results
def has_table_def(table_def):
""" Check if table is defined in the database.
"""
return len(table_def)
def compare_table_defs(psql, db, table, cur_tbl_def, tmp_tbl_def):
"""
Compare table definitions before allowing supported modifications.
Currently, only adding new columns is allowed.
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
table -- table name for which definition may change
cur_tbl_def -- table definition for existing table
tmp_tbl_def -- table definition for temp table which may contain changes
Return: None
"""
copy_tmp_tbl_def = copy.deepcopy(tmp_tbl_def)
if not has_table_def(cur_tbl_def):
raise ValueError("missing existing table: {0}".format(table))
if len(tmp_tbl_def) < len(cur_tbl_def):
raise ValueError("{0}: new schema has less columns".format(table))
for row in cur_tbl_def:
tmp_row = copy_tmp_tbl_def.pop(0)
diff = [i for i in range(len(row)) if row[i] != tmp_row[i]
and i not in [PgTableDef.TableName, PgTableDef.Encoding]]
if diff:
raise ValueError("{0}: change to column '{1}' not allowed".format(
table, row[PgTableDef.Column]
))
def add_columns(psql, db, ddate, table, to_add,
tbl_tuple, defaults, logstream):
"""
Add new columns to existing table. Copy data into temp table
to detect encoding.
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
ddate -- the date string of the data to be copied formatted YYYY/MM/DD
table -- table name where to add columns -- not namespaced
to_add -- list of columns to add
tbl_tuple -- a tuple containing path to table in PSV format and a
Redshift temporary table where to load data
logstream -- a PipelineStreamLogger
Return: None
"""
_, tmp_tbl_name = tbl_tuple
for row in to_add:
if row[PgTableDef.SortKey] or row[PgTableDef.DistKey]:
raise ValueError("{0}: {1} new column is a sortkey \
or distkey".format(table, row[PgTableDef.Column]))
if to_add:
# copy data into tmp_tbl_name in order to detect encoding
copy_table(psql, db, ddate, tbl_tuple, MAX_TTL_DAYS, logstream)
tmp_tbl_def = get_table_def(psql, db, tmp_tbl_name)
for row in tmp_tbl_def[len(tmp_tbl_def) - len(to_add):]:
encoding = row[PgTableDef.Encoding]
query = QUERY_ADD_COLUMN.format(
get_namespaced_tablename(table),
row[PgTableDef.Column],
row[PgTableDef.Type],
"raw" if encoding == "none" else encoding,
"not null" if row[PgTableDef.NotNull] else "null",
defaults[row[PgTableDef.Column]]
)
psql.run_sql(query, db, query)
def get_column_defaults(table):
""" Extract column default value for each column.
If column lacks default, set default to NULL.
Returns: a map(column_name, default_value)
"""
defaults = dict()
regex1 = re.compile(r'default\s+(\(|"|\')(.*)(?=(\)|"|\'))')
regex2 = re.compile(r'default\s+(?P<default>[\S]*)')
for col in table['columns']:
col_name = col.get('name', col.get('log_key'))
defaults[col_name] = 'NULL'
match1 = regex1.search(col['sql_attr'])
match2 = regex2.search(col['sql_attr'])
if match1:
defaults[col_name] = ''.join(match1.groups())
elif match2:
defaults[col_name] = match2.group('default')
return defaults
def create_tables(psql, db, create_tuples):
""" Create tables if not
create_tuples -- a list of (table, sql table create command)
"""
# create tables if missing for schema
current_tables = get_current_tables(psql, db)
for table, create in create_tuples:
if table not in current_tables:
psql.run_sql(create, db, " creating table: {0}".format(table))
def update_database_schema(psql, db, ddate, s3_logdir, schema_file, logstream):
"""
Check new schema against what exists in the database such as
1. create new tables if missing
2. compare table definitions
3. add new columns
Args:
psql -- handle to talk to redshift
db -- redshift database containing table
ddate -- the date string of the data to be copied formatted YYYY/MM/DD
s3_logdir -- path to location of tables in PSV format
schema_file -- the name of the schema file with the create table command
logstream -- a PipelineStreamLogger
Return: None
"""
# TODO: db.yaml as SPOT
fname = schema_file.replace('.sql', '.yaml')
yaml_dict = load_from_file(fname)
rs_log_schema = RedShiftLogSchema(safe_load(yaml_dict))
err_tbl_name, err_tbl = rs_log_schema.get_error_table()
rs_log_schema.table_add(err_tbl_name, err_tbl)
tables = rs_log_schema.tables()
# create tables if missing for schema
create_tuples = get_table_creates(schema_file, logstream)
create_tables(psql, db, create_tuples)
# check for schema changes
for table in tables.keys():
tmp_tbl_name = "tmp_{0}".format(table)
namespaced_tmp_table = get_namespaced_tablename(tmp_tbl_name)
# create temp tables
create_table_cmd = mk_create_table_sql_cmd(namespaced_tmp_table, tables[table])
psql.run_sql(create_table_cmd, db, create_table_cmd)
try:
# fetch table definition
cur_tbl_def = get_table_def(psql, db, table)
tmp_tbl_def = get_table_def(psql, db, tmp_tbl_name)
compare_table_defs(psql, db, table, cur_tbl_def, tmp_tbl_def)
tbl_tuple = (join(s3_logdir, ddate, table), tmp_tbl_name)
to_add = tmp_tbl_def[len(cur_tbl_def):]
defaults = get_column_defaults(tables[table])
add_columns(psql, db, ddate, table, to_add,
tbl_tuple, defaults, logstream)
finally:
if tmp_tbl_name != table:
delete_table_cmd = 'drop table {0}'.format(namespaced_tmp_table)
psql.run_sql(delete_table_cmd, db, delete_table_cmd)
def copy_table(psql_helper, db_name, ddate, log_tuple, ttl_days, logstream):
s3_log, rs_table = log_tuple
namespaced_table_name = get_namespaced_tablename(rs_table)
table_start = time.time()
extra_msg = "from s3 log: {0}".format(s3_log)
logstream.write_msg('starting', extra_msg=extra_msg)
# about to load new day, remove oldest
rows_deleted = None
if ttl_days is not None:
rows_deleted = \
delete_old_data(psql_helper, db_name, rs_table, ttl_days - 1)
if rows_deleted:
logstream.write_msg('delete_ok',
extra_msg="{0} rows".format(rows_deleted))
# Try to reclaim disk space. If not needed, it will be fast.
# Calling here and not in the 'if rows_deleted' code to prevent
# scenario where rows were deleted but compact failed. Then on retry
# there will be nothing to delete but since space is not reclaimed
# there may not be enough for a new load, resulting in failure forever.
if ttl_days is not None:
compact_table(psql_helper, db_name, namespaced_table_name)
delimiter = read_string('redshift_column_delimiter')
delimiter = delimiter.decode("string_escape")
if delimiter not in string.printable:
delimiter = '\\' + oct(ord(delimiter))
copy_sql = LOAD % (namespaced_table_name, s3_log, delimiter)
result = psql_helper.run_sql(
copy_sql,
db_name, " copying from " + s3_log,
s3_needed=True,
time_est_secs=read_int('pipeline.load_step.copy_time_est_secs')
)
if result is not False:
logstream.write_msg('complete', job_start_secs=table_start,
extra_msg=extra_msg)
return result
def copy_tables(psql_helper, status_helper,
db_name, ddate, log_tuples, ttl_days, logstream):
"""
copy_tables takes a list of input log, table pairs and copies each
input log to its corresponding input table
Args:
psql_helper -- a RedshiftPostgres object to help perform the copy
status_helper -- An object handle to interact with status table
db_name -- the name of the db to which we're copying
ddate -- the date string of the data to be copied formatted YYYY/MM/DD
log_tuples -- a list of (log, table) pairs
ttl_days -- how many days to retain loaded data
logstream -- a PipelineStreamLogger
Returns:
---
"""
start = time.time()
yaml_versions = get_yaml_table_versions(pipeline_yaml_schema_file_path())
status_helper.update_status(db_name, ddate, yaml_versions, "running")
err_tbl_name, _ = RedShiftLogSchema().get_error_table()
for log_tuple in log_tuples:
result = False
error_msg = None
try:
result = copy_table(psql_helper, db_name, ddate,
log_tuple, ttl_days, logstream)
except KeyboardInterrupt:
result = None
raise
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
error_msg = "{0}".format({
'crash_tb': ''.join(traceback.format_tb(exc_tb)),
'crash_exc': traceback.format_exception_only(
exc_type, exc_value
)[0].strip()
})
# ignore copy error if error table does not exist
s3_log, rs_table = log_tuple
if rs_table == err_tbl_name and \
exc_value.args[0].find('The specified S3 prefix') != -1 and \
exc_value.args[0].find('does not exist') != -1:
result = None
finally:
if result is False:
_, rs_table = log_tuple
if error_msg is None:
error_msg = "failed copy {0} for date: {1}".format(
get_namespaced_tablename(rs_table), ddate
)
status_helper.update_status(
db_name, ddate, yaml_versions,
"error", start_time_secs=start, error_msg=error_msg
)
handle_error(error_msg, logstream)
status_helper.update_status(
db_name, ddate, yaml_versions, "complete", start_time_secs=start
)
def parse_command_line(sys_argv):
"""
parse_command_line parses the arguments from the command line other than
the name of the file
Args:
sys_argv -- sys.argv
Returns:
a namespace of arguments
"""
parser = get_base_parser()
parser = add_load_args(parser)
parser.add_argument(
"--date",
help="either 'yesterday' or YYYY-MM-DD \
if there is no date, s3_to_redshift checks back 5 days"
)
# skip the file name, parse everything after
return parser.parse_args(sys_argv[1:])
def s3_to_redshift_main(args):
db = read_string('pipeline.redshift_database')
s3_log_prefix = read_string('pipeline.s3_output_prefix').format(
logname=os.environ.get('LOGNAME', 'unknown')
)
# setup logging
stream_name = read_string('pipeline.load_step.s3_to_redshift_stream')
LOG_STREAM = PipelineStreamLogger(
stream_name,
args.run_local,
's3_to_redshift',
job_name='load'
)
# handle to redshift db
loader_psql = RedshiftPostgres(
LOG_STREAM, args.private, run_local=args.run_local
)
if args.skip_progress_in_redshift:
status_table = DynamoDbStatusTable(
LOG_STREAM, run_local=args.run_local
)
else:
status_table = RedshiftStatusTable(loader_psql)
create_tuples = get_table_creates(args.db_file, LOG_STREAM)
data_candidates = dates_from_rs_status(
status_table,
db,
LOG_STREAM,
args.retry_errors,
args.date,
)
if data_candidates:
try:
update_database_schema(
loader_psql,
db,
data_candidates[0],
s3_log_prefix,
args.db_file,
LOG_STREAM
)
except Exception as e:
status_table.update_status(
db,
data_candidates[0],
get_yaml_table_versions(pipeline_yaml_schema_file_path()),
"error",
start_time_secs=time.time(), error_msg=repr(e)
)
raise
elif args.date is not None:
raise IOError("{0} data is either already loaded \
or has not yet completed ET step".format(args.date))
logs_to_copy = []
for input_date in data_candidates:
LOG_STREAM = PipelineStreamLogger(
stream_name,
args.run_local,
's3_to_redshift',
job_name='load',
input_date=input_date
)
logs_to_copy = [
(join(s3_log_prefix, input_date, table), table)
for (table, _) in create_tuples
]
copy_tables(loader_psql, status_table, db, input_date, logs_to_copy,
args.ttl_days, LOG_STREAM)
if __name__ == '__main__':
args_namespace = parse_command_line(sys.argv)
load_package_config(args_namespace.config)
YamlConfiguration(args_namespace.io_yaml, optional=False)
if args_namespace.config_override:
YamlConfiguration(args_namespace.config_override, optional=False)
s3_to_redshift_main(args_namespace)
| Yelp/mycroft | mycroft/sherlock/batch/s3_to_redshift.py | Python | mit | 26,638 |
# -*- coding: utf-8 -*-
import hashlib
import json
import os
import shutil
import tempfile
import zipfile
from datetime import datetime
from django import forms
from django.core.files.storage import default_storage as storage
from django.conf import settings
import mock
import path
from nose.tools import eq_
import amo
import amo.tests
import amo.utils
import devhub.signals
from amo.utils import rm_local_tmp_dir
from addons.models import Addon
from applications.models import Application, AppVersion
from files.models import File, FileUpload, FileValidation, nfd_str, Platform
from files.helpers import copyfileobj
from files.utils import check_rdf, JetpackUpgrader, parse_addon, parse_xpi
from versions.models import Version
class UploadTest(amo.tests.TestCase, amo.tests.AMOPaths):
"""
Base for tests that mess with file uploads, safely using temp directories.
"""
fixtures = ['applications/all_apps.json', 'base/appversion']
def setUp(self):
self._rename = path.path.rename
path.path.rename = path.path.copy
# The validator task (post Addon upload) loads apps.json
# so ensure it exists:
from django.core.management import call_command
call_command('dump_apps')
def tearDown(self):
path.path.rename = self._rename
def file_path(self, *args, **kw):
return self.file_fixture_path(*args, **kw)
def get_upload(self, filename=None, abspath=None, validation=None,
is_webapp=False):
xpi = open(abspath if abspath else self.file_path(filename)).read()
upload = FileUpload.from_post([xpi], filename=abspath or filename,
size=1234)
# Simulate what fetch_manifest() does after uploading an app.
upload.is_webapp = is_webapp
upload.validation = (validation or
json.dumps(dict(errors=0, warnings=1, notices=2,
metadata={}, messages=[])))
upload.save()
return upload
class TestFile(amo.tests.TestCase, amo.tests.AMOPaths):
"""
Tests the methods of the File model.
"""
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/platforms']
def test_get_absolute_url(self):
f = File.objects.get(id=67442)
url = f.get_absolute_url(src='src')
expected = ('/firefox/downloads/file/67442/'
'delicious_bookmarks-2.1.072-fx.xpi?src=src')
assert url.endswith(expected), url
def check_delete(self, file_, filename):
"""Test that when the File object is deleted, it is removed from the
filesystem."""
try:
with storage.open(filename, 'w') as f:
f.write('sample data\n')
assert storage.exists(filename)
file_.delete()
assert not storage.exists(filename)
finally:
if storage.exists(filename):
storage.delete(filename)
def test_delete_by_version(self):
f = File.objects.get(pk=67442)
version = f.version
self.check_delete(version, f.file_path)
def test_delete_file_path(self):
f = File.objects.get(pk=67442)
self.check_delete(f, f.file_path)
def test_delete_mirror_file_path(self):
f = File.objects.get(pk=67442)
self.check_delete(f, f.mirror_file_path)
def test_delete_no_file(self):
# test that the file object can be deleted without the file
# being present
file = File.objects.get(pk=74797)
filename = file.file_path
assert not os.path.exists(filename), 'File exists at: %s' % filename
file.delete()
def test_delete_signal(self):
"""Test that if there's no filename, the signal is ok."""
file = File.objects.get(pk=67442)
file.update(filename='')
file.delete()
@mock.patch('files.models.File.hide_disabled_file')
def test_disable_signal(self, hide_mock):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert not hide_mock.called
f.status = amo.STATUS_DISABLED
f.save()
assert hide_mock.called
@mock.patch('files.models.File.unhide_disabled_file')
def test_unhide_on_enable(self, unhide_mock):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert not unhide_mock.called
f = File.objects.get(pk=67442)
f.status = amo.STATUS_DISABLED
f.save()
assert not unhide_mock.called
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
f.save()
assert unhide_mock.called
def test_unhide_disabled_files(self):
f = File.objects.get(pk=67442)
f.status = amo.STATUS_PUBLIC
with storage.open(f.guarded_file_path, 'wb') as fp:
fp.write('some data\n')
f.unhide_disabled_file()
assert storage.exists(f.file_path)
assert storage.open(f.file_path).size
def test_unhide_disabled_file_mirroring(self):
tmp = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(tmp))
with mock.patch.object(settings, 'MIRROR_STAGE_PATH', tmp):
fo = File.objects.get(pk=67442)
with storage.open(fo.file_path, 'wb') as fp:
fp.write('<pretend this is an xpi>')
with storage.open(fo.mirror_file_path, 'wb') as fp:
fp.write('<pretend this is an xpi>')
fo.status = amo.STATUS_DISABLED
fo.save()
assert not storage.exists(fo.file_path), 'file not hidden'
assert not storage.exists(fo.mirror_file_path), (
'file not removed from mirror')
fo = File.objects.get(pk=67442)
fo.status = amo.STATUS_PUBLIC
fo.save()
assert storage.exists(fo.file_path), 'file not un-hidden'
assert storage.exists(fo.mirror_file_path), (
'file not copied back to mirror')
@mock.patch('files.models.File.copy_to_mirror')
def test_copy_to_mirror_on_status_change(self, copy_mock):
assert amo.STATUS_UNREVIEWED not in amo.MIRROR_STATUSES
f = File.objects.get(pk=67442)
f.status = amo.STATUS_UNREVIEWED
f.save()
assert not copy_mock.called
copy_mock.reset_mock()
for status in amo.MIRROR_STATUSES:
f = File.objects.get(pk=67442)
f.status = status
f.save()
assert copy_mock.called, "Copy not called"
f.status = amo.STATUS_UNREVIEWED
f.save()
copy_mock.reset_mock()
def test_latest_url(self):
# With platform.
f = File.objects.get(id=74797)
base = '/firefox/downloads/latest/'
expected = base + '{0}/platform:3/addon-{0}-latest.xpi'
eq_(expected.format(f.version.addon_id), f.latest_xpi_url())
# No platform.
f = File.objects.get(id=67442)
expected = base + '{0}/addon-{0}-latest.xpi'
eq_(expected.format(f.version.addon_id), f.latest_xpi_url())
def test_eula_url(self):
f = File.objects.get(id=67442)
eq_(f.eula_url(), '/en-US/firefox/addon/3615/eula/67442')
def test_generate_filename(self):
f = File.objects.get(id=67442)
eq_(f.generate_filename(), 'delicious_bookmarks-2.1.072-fx.xpi')
def test_generate_filename_webapp(self):
f = File.objects.get(id=67442)
f.version.addon.app_slug = 'testing-123'
f.version.addon.type = amo.ADDON_WEBAPP
eq_(f.generate_filename(), 'testing-123-2.1.072.webapp')
def test_generate_filename_packaged_app(self):
f = File.objects.get(id=67442)
f.version.addon.app_slug = 'testing-123'
f.version.addon.type = amo.ADDON_WEBAPP
f.version.addon.is_packaged = True
eq_(f.generate_filename(), 'testing-123-2.1.072.zip')
def test_generate_webapp_fn_non_ascii(self):
f = File()
f.version = Version(version='0.1.7')
f.version.compatible_apps = (amo.FIREFOX,)
f.version.addon = Addon(app_slug=u' フォクすけ といっしょ',
type=amo.ADDON_WEBAPP)
eq_(f.generate_filename(), 'app-0.1.7.webapp')
def test_generate_webapp_fn_partial_non_ascii(self):
f = File()
f.version = Version(version='0.1.7')
f.version.compatible_apps = (amo.FIREFOX,)
f.version.addon = Addon(app_slug=u'myapp フォクすけ といっしょ',
type=amo.ADDON_WEBAPP)
eq_(f.generate_filename(), 'myapp-0.1.7.webapp')
def test_pretty_filename(self):
f = File.objects.get(id=67442)
f.generate_filename()
eq_(f.pretty_filename(), 'delicious_bookmarks-2.1.072-fx.xpi')
def test_pretty_filename_short(self):
f = File.objects.get(id=67442)
f.version.addon.name = 'A Place Where The Sea Remembers Your Name'
f.generate_filename()
eq_(f.pretty_filename(), 'a_place_where_the...-2.1.072-fx.xpi')
def test_generate_filename_platform_specific(self):
f = File.objects.get(id=67442)
f.platform_id = amo.PLATFORM_MAC.id
eq_(f.generate_filename(), 'delicious_bookmarks-2.1.072-fx-mac.xpi')
def test_generate_filename_many_apps(self):
f = File.objects.get(id=67442)
f.version.compatible_apps = (amo.FIREFOX, amo.THUNDERBIRD)
eq_(f.generate_filename(), 'delicious_bookmarks-2.1.072-fx+tb.xpi')
def test_generate_filename_ja(self):
f = File()
f.version = Version(version='0.1.7')
f.version.compatible_apps = (amo.FIREFOX,)
f.version.addon = Addon(name=u' フォクすけ といっしょ')
eq_(f.generate_filename(), 'addon-0.1.7-fx.xpi')
def clean_files(self, f):
if f.mirror_file_path and storage.exists(f.mirror_file_path):
storage.delete(f.mirror_file_path)
if not storage.exists(f.file_path):
with storage.open(f.file_path, 'w') as fp:
fp.write('sample data\n')
def test_copy_to_mirror(self):
f = File.objects.get(id=67442)
self.clean_files(f)
f.copy_to_mirror()
assert storage.exists(f.mirror_file_path)
@mock.patch('shutil.copyfile')
def test_not_copy_to_mirror(self, copyfile):
f = File.objects.get(id=67442)
f.version.addon.update(premium_type=amo.ADDON_PREMIUM)
self.clean_files(f)
f.copy_to_mirror()
assert not f.mirror_file_path
assert not copyfile.called
def test_generate_hash(self):
f = File()
f.version = Version.objects.get(pk=81551)
fn = self.xpi_path('delicious_bookmarks-2.1.106-fx')
assert f.generate_hash(fn).startswith('sha256:fd277d45ab44f6240e')
def test_public_is_testable(self):
f = File.objects.get(pk=67442)
f.update(status=amo.STATUS_PUBLIC)
eq_(f.can_be_perf_tested(), True)
def test_reviewed_is_testable(self):
f = File.objects.get(pk=67442)
f.update(status=amo.STATUS_LITE)
eq_(f.can_be_perf_tested(), True)
def test_unreviewed_is_not_testable(self):
f = File.objects.get(pk=67442)
f.update(status=amo.STATUS_UNREVIEWED)
eq_(f.can_be_perf_tested(), False)
def test_disabled_is_not_testable(self):
f = File.objects.get(pk=67442)
f.update(status=amo.STATUS_DISABLED)
eq_(f.can_be_perf_tested(), False)
def test_deleted_addon_is_not_testable(self):
f = File.objects.get(pk=67442)
f.version.addon.update(disabled_by_user=True)
eq_(f.can_be_perf_tested(), False)
def test_webapp_is_not_testable(self):
f = File.objects.get(pk=67442)
f.version.addon.update(type=amo.ADDON_WEBAPP)
eq_(f.can_be_perf_tested(), False)
def test_file_is_mirrorable(self):
f = File.objects.get(pk=67442)
eq_(f.is_mirrorable(), True)
f.update(status=amo.STATUS_DISABLED)
eq_(f.is_mirrorable(), False)
def test_premium_addon_not_mirrorable(self):
f = File.objects.get(pk=67442)
f.version.addon.premium_type = amo.ADDON_PREMIUM
eq_(f.is_mirrorable(), False)
def test_dont_mirror_apps(self):
f = File.objects.get(pk=67442)
f.version.addon.update(type=amo.ADDON_WEBAPP)
eq_(f.is_mirrorable(), False)
def test_addon(self):
f = File.objects.get(pk=67442)
addon_id = f.version.addon_id
addon = Addon.objects.no_cache().get(pk=addon_id)
addon.update(status=amo.STATUS_DELETED)
eq_(f.addon.id, addon_id)
class TestParseXpi(amo.tests.TestCase):
fixtures = ['base/apps']
def setUp(self):
for version in ('3.0', '3.6.*'):
AppVersion.objects.create(application_id=amo.FIREFOX.id,
version=version)
def parse(self, addon=None, filename='extension.xpi'):
path = 'apps/files/fixtures/files/' + filename
xpi = os.path.join(settings.ROOT, path)
return parse_addon(open(xpi), addon)
def test_parse_basics(self):
# Everything but the apps
exp = {'guid': 'guid@xpi',
'name': 'xpi name',
'summary': 'xpi description',
'version': '0.1',
'homepage': 'http://homepage.com',
'type': 1}
parsed = self.parse()
for key, value in exp.items():
eq_(parsed[key], value)
def test_parse_apps(self):
exp = (amo.FIREFOX,
amo.FIREFOX.id,
AppVersion.objects.get(version='3.0'),
AppVersion.objects.get(version='3.6.*'))
eq_(self.parse()['apps'], [exp])
def test_parse_apps_bad_appver(self):
AppVersion.objects.all().delete()
eq_(self.parse()['apps'], [])
def test_parse_apps_bad_guid(self):
Application.objects.all().delete()
eq_(self.parse()['apps'], [])
def test_guid_match(self):
addon = Addon.objects.create(guid='guid@xpi', type=1)
eq_(self.parse(addon)['guid'], 'guid@xpi')
def test_guid_nomatch(self):
addon = Addon.objects.create(guid='xxx', type=1)
with self.assertRaises(forms.ValidationError) as e:
self.parse(addon)
eq_(e.exception.messages, ["UUID doesn't match add-on."])
def test_guid_dupe(self):
Addon.objects.create(guid='guid@xpi', type=1)
with self.assertRaises(forms.ValidationError) as e:
self.parse()
eq_(e.exception.messages, ['Duplicate UUID found.'])
def test_match_type(self):
addon = Addon.objects.create(guid='guid@xpi', type=4)
with self.assertRaises(forms.ValidationError) as e:
self.parse(addon)
eq_(e.exception.messages,
["<em:type> doesn't match add-on"])
def test_xml_for_extension(self):
addon = Addon.objects.create(guid='guid@xpi', type=1)
with self.assertRaises(forms.ValidationError) as e:
self.parse(addon, filename='search.xml')
eq_(e.exception.messages, ["<em:type> doesn't match add-on"])
def test_unknown_app(self):
data = self.parse(filename='theme-invalid-app.jar')
eq_(data['apps'], [])
def test_bad_zipfile(self):
with self.assertRaises(forms.ValidationError) as e:
parse_addon('baxmldzip.xpi', None)
eq_(e.exception.messages, ['Could not parse install.rdf.'])
def test_parse_dictionary(self):
result = self.parse(filename='dictionary-test.xpi')
eq_(result['type'], amo.ADDON_DICT)
def test_parse_dictionary_explicit_type(self):
result = self.parse(filename='dictionary-explicit-type-test.xpi')
eq_(result['type'], amo.ADDON_DICT)
def test_parse_dictionary_extension(self):
result = self.parse(filename='dictionary-extension-test.xpi')
eq_(result['type'], amo.ADDON_EXTENSION)
def test_parse_jar(self):
result = self.parse(filename='theme.jar')
eq_(result['type'], amo.ADDON_THEME)
def test_parse_theme_by_type(self):
result = self.parse(filename='theme-type.xpi')
eq_(result['type'], amo.ADDON_THEME)
def test_parse_theme_with_internal_name(self):
result = self.parse(filename='theme-internal-name.xpi')
eq_(result['type'], amo.ADDON_THEME)
def test_parse_no_type(self):
result = self.parse(filename='no-type.xpi')
eq_(result['type'], amo.ADDON_EXTENSION)
def test_parse_invalid_type(self):
result = self.parse(filename='invalid-type.xpi')
eq_(result['type'], amo.ADDON_EXTENSION)
def test_parse_langpack(self):
result = self.parse(filename='langpack.xpi')
eq_(result['type'], amo.ADDON_LPAPP)
def test_good_version_number(self):
check_rdf({'guid': 'guid', 'version': '1.2a-b+32*__yeah'})
check_rdf({'guid': 'guid', 'version': '1' * 32})
def test_bad_version_number(self):
with self.assertRaises(forms.ValidationError) as e:
check_rdf({'guid': 'guid', 'version': 'bad #version'})
msg = e.exception.messages[0]
assert msg.startswith('Version numbers should only contain'), msg
def test_long_version_number(self):
with self.assertRaises(forms.ValidationError) as e:
check_rdf({'guid': 'guid', 'version': '1' * 33})
msg = e.exception.messages[0]
eq_(msg, 'Version numbers should have fewer than 32 characters.')
def test_strict_compat_undefined(self):
result = self.parse()
eq_(result['strict_compatibility'], False)
def test_strict_compat_enabled(self):
result = self.parse(filename='strict-compat.xpi')
eq_(result['strict_compatibility'], True)
class TestParseAlternateXpi(amo.tests.TestCase, amo.tests.AMOPaths):
# This install.rdf is completely different from our other xpis.
fixtures = ['base/apps']
def setUp(self):
for version in ('3.0', '4.0b3pre'):
AppVersion.objects.create(application_id=amo.FIREFOX.id,
version=version)
def parse(self, filename='alt-rdf.xpi'):
return parse_addon(open(self.file_fixture_path(filename)))
def test_parse_basics(self):
# Everything but the apps.
exp = {'guid': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'name': 'Delicious Bookmarks',
'summary': 'Access your bookmarks wherever you go and keep '
'them organized no matter how many you have.',
'homepage': 'http://delicious.com',
'type': amo.ADDON_EXTENSION,
'version': '2.1.106'}
parsed = self.parse()
for key, value in exp.items():
eq_(parsed[key], value)
def test_parse_apps(self):
exp = (amo.FIREFOX,
amo.FIREFOX.id,
AppVersion.objects.get(version='3.0'),
AppVersion.objects.get(version='4.0b3pre'))
eq_(self.parse()['apps'], [exp])
@mock.patch('files.utils.rdflib.Graph')
def test_no_manifest_node(self, graph_mock):
rdf_mock = mock.Mock()
graph_mock.return_value.parse.return_value = rdf_mock
rdf_mock.triples.return_value = iter([])
rdf_mock.subjects.return_value = iter([])
with self.assertRaises(forms.ValidationError) as e:
self.parse()
eq_(e.exception.messages, ['Could not parse install.rdf.'])
class TestFileUpload(UploadTest):
fixtures = ['applications/all_apps.json', 'base/appversion',
'base/addon_3615']
def setUp(self):
super(TestFileUpload, self).setUp()
self.data = 'file contents'
def upload(self):
# The data should be in chunks.
data = [''.join(x) for x in amo.utils.chunked(self.data, 3)]
return FileUpload.from_post(data, 'filename.xpi',
len(self.data))
def test_from_post_write_file(self):
eq_(storage.open(self.upload().path).read(), self.data)
def test_from_post_filename(self):
eq_(self.upload().name, 'filename.xpi')
def test_from_post_hash(self):
hash = hashlib.sha256(self.data).hexdigest()
eq_(self.upload().hash, 'sha256:%s' % hash)
def test_save_without_validation(self):
f = FileUpload.objects.create()
assert not f.valid
def test_save_with_validation(self):
f = FileUpload.objects.create(
validation='{"errors": 0, "metadata": {}}')
assert f.valid
f = FileUpload.objects.create(validation='wtf')
assert not f.valid
def test_update_with_validation(self):
f = FileUpload.objects.create()
f.validation = '{"errors": 0, "metadata": {}}'
f.save()
assert f.valid
def test_update_without_validation(self):
f = FileUpload.objects.create()
f.save()
assert not f.valid
def test_ascii_names(self):
fu = FileUpload.from_post('', u'jétpack.xpi', 0)
assert 'xpi' in fu.name
fu = FileUpload.from_post('', u'мозила_србија-0.11-fx.xpi', 0)
assert 'xpi' in fu.name
fu = FileUpload.from_post('', u'フォクすけといっしょ.xpi', 0)
assert 'xpi' in fu.name
fu = FileUpload.from_post('', u'\u05d0\u05d5\u05e1\u05e3.xpi', 0)
assert 'xpi' in fu.name
def test_validator_sets_binary_via_extensions(self):
validation = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"contains_binary_extension": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes",
}
})
upload = self.get_upload(filename='extension.xpi',
validation=validation)
version = Version.objects.filter(addon__pk=3615)[0]
plat = Platform.objects.get(pk=amo.PLATFORM_LINUX.id)
file_ = File.from_upload(upload, version, plat)
eq_(file_.binary, True)
def test_validator_sets_binary_via_content(self):
validation = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"contains_binary_content": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes",
}
})
upload = self.get_upload(filename='extension.xpi',
validation=validation)
version = Version.objects.filter(addon__pk=3615)[0]
plat = Platform.objects.get(pk=amo.PLATFORM_LINUX.id)
file_ = File.from_upload(upload, version, plat)
eq_(file_.binary, True)
def test_validator_sets_require_chrome(self):
validation = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes",
"requires_chrome": True
}
})
upload = self.get_upload(filename='extension.xpi',
validation=validation)
version = Version.objects.filter(addon__pk=3615)[0]
plat = Platform.objects.get(pk=amo.PLATFORM_LINUX.id)
file_ = File.from_upload(upload, version, plat)
eq_(file_.requires_chrome, True)
class TestFileFromUpload(UploadTest):
fixtures = ['base/apps']
def setUp(self):
super(TestFileFromUpload, self).setUp()
appver = {amo.FIREFOX: ['3.0', '3.6', '3.6.*', '4.0b6'],
amo.MOBILE: ['0.1', '2.0a1pre']}
for app, versions in appver.items():
for version in versions:
AppVersion(application_id=app.id, version=version).save()
self.platform = Platform.objects.create(id=amo.PLATFORM_MAC.id)
self.addon = Addon.objects.create(guid='guid@jetpack',
type=amo.ADDON_EXTENSION,
name='xxx')
self.version = Version.objects.create(addon=self.addon)
def upload(self, name):
if os.path.splitext(name)[-1] not in ['.xml', '.xpi', '.jar']:
name = name + '.xpi'
v = json.dumps(dict(errors=0, warnings=1, notices=2, metadata={}))
fname = nfd_str(self.xpi_path(name))
if not storage.exists(fname):
with storage.open(fname, 'w') as fs:
copyfileobj(open(fname), fs)
d = dict(path=fname, name=name,
hash='sha256:%s' % name, validation=v)
return FileUpload.objects.create(**d)
def test_jetpack_version(self):
upload = self.upload('jetpack')
f = File.from_upload(upload, self.version, self.platform)
file_ = File.objects.get(id=f.id)
eq_(file_.jetpack_version, '1.0b4')
eq_(file_.builder_version, None)
eq_(['jetpack'], [t.tag_text for t in self.addon.tags.all()])
def test_jetpack_builder_version(self):
upload = self.upload('jetpack_builder')
f = File.from_upload(upload, self.version, self.platform)
file_ = File.objects.get(id=f.id)
eq_(file_.builder_version, '1.1.1.1')
def test_jetpack_with_invalid_json(self):
upload = self.upload('jetpack_invalid')
f = File.from_upload(upload, self.version, self.platform)
file_ = File.objects.get(id=f.id)
eq_(file_.jetpack_version, None)
eq_(file_.builder_version, None)
assert not self.addon.tags.exists()
def test_filename(self):
upload = self.upload('jetpack')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename, 'xxx-0.1-mac.xpi')
def test_filename_no_extension(self):
upload = self.upload('jetpack')
# Remove the exension.
upload.name = upload.name.rsplit('.', 1)[0]
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename, 'xxx-0.1-mac.xpi')
def test_file_validation(self):
upload = self.upload('jetpack')
file = File.from_upload(upload, self.version, self.platform)
fv = FileValidation.objects.get(file=file)
eq_(fv.validation, upload.validation)
eq_(fv.valid, True)
eq_(fv.errors, 0)
eq_(fv.warnings, 1)
eq_(fv.notices, 2)
def test_file_hash(self):
upload = self.upload('jetpack')
f = File.from_upload(upload, self.version, self.platform)
assert f.hash.startswith('sha256:')
assert len(f.hash) == 64 + 7 # 64 for hash, 7 for 'sha256:'
def test_no_restart_true(self):
upload = self.upload('jetpack')
d = parse_addon(upload.path)
f = File.from_upload(upload, self.version, self.platform, parse_data=d)
assert f.no_restart
def test_no_restart_dictionary(self):
upload = self.upload('dictionary-explicit-type-test')
d = parse_addon(upload.path)
f = File.from_upload(upload, self.version, self.platform, parse_data=d)
assert f.no_restart
def test_no_restart_false(self):
upload = self.upload('extension')
d = parse_addon(upload.path)
f = File.from_upload(upload, self.version, self.platform, parse_data=d)
assert not f.no_restart
def test_utf8(self):
upload = self.upload(u'jétpack')
self.version.addon.name = u'jéts!'
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename, u'jets-0.1-mac.xpi')
def test_size(self):
upload = self.upload('extension')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.size, 2264)
def test_size_small(self):
upload = self.upload('alt-rdf')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.size, 675)
def test_beta_version_non_public(self):
# Only public add-ons can get beta versions.
upload = self.upload('beta-extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_LITE)
eq_(self.addon.status, amo.STATUS_LITE)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_UNREVIEWED)
def test_public_to_beta(self):
upload = self.upload('beta-extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_PUBLIC)
eq_(self.addon.status, amo.STATUS_PUBLIC)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_BETA)
def test_trusted_public_to_beta(self):
upload = self.upload('beta-extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_PUBLIC, trusted=True)
eq_(self.addon.status, amo.STATUS_PUBLIC)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_BETA)
def test_public_to_unreviewed(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_PUBLIC)
eq_(self.addon.status, amo.STATUS_PUBLIC)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_UNREVIEWED)
def test_trusted_public_to_public(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_PUBLIC, trusted=True)
eq_(self.addon.status, amo.STATUS_PUBLIC)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_PUBLIC)
def test_lite_to_unreviewed(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_LITE)
eq_(self.addon.status, amo.STATUS_LITE)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_UNREVIEWED)
def test_trusted_lite_to_lite(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
self.addon.update(status=amo.STATUS_LITE, trusted=True)
eq_(self.addon.status, amo.STATUS_LITE)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_LITE)
def test_litenominated_to_unreviewed(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
with mock.patch('addons.models.Addon.update_status'):
# mock update_status because it doesn't like Addons without files.
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
eq_(self.addon.status, amo.STATUS_LITE_AND_NOMINATED)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_UNREVIEWED)
def test_trusted_litenominated_to_litenominated(self):
upload = self.upload('extension')
data = parse_addon(upload.path)
with mock.patch('addons.models.Addon.update_status'):
# mock update_status because it doesn't like Addons without files.
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED,
trusted=True)
eq_(self.addon.status, amo.STATUS_LITE_AND_NOMINATED)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.status, amo.STATUS_LITE_AND_NOMINATED)
def test_file_hash_paranoia(self):
upload = self.upload('extension')
f = File.from_upload(upload, self.version, self.platform)
assert f.hash.startswith('sha256:035ae07b4988711')
def test_strict_compat(self):
upload = self.upload('strict-compat')
data = parse_addon(upload.path)
f = File.from_upload(upload, self.version, self.platform, data)
eq_(f.strict_compatibility, True)
def test_theme_extension(self):
upload = self.upload('theme.jar')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename.endswith('.xpi'), True)
def test_extension_extension(self):
upload = self.upload('extension.xpi')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename.endswith('.xpi'), True)
assert not self.addon.tags.exists()
def test_langpack_extension(self):
upload = self.upload('langpack.xpi')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename.endswith('.xpi'), True)
def test_search_extension(self):
upload = self.upload('search.xml')
f = File.from_upload(upload, self.version, self.platform)
eq_(f.filename.endswith('.xml'), True)
class TestZip(amo.tests.TestCase, amo.tests.AMOPaths):
def test_zip(self):
# This zip contains just one file chrome/ that we expect
# to be unzipped as a directory, not a file.
xpi = self.xpi_path('directory-test')
# This is to work around: http://bugs.python.org/issue4710
# which was fixed in Python 2.6.2. If the required version
# of Python for zamboni goes to 2.6.2 or above, this can
# be removed.
try:
dest = tempfile.mkdtemp()
zipfile.ZipFile(xpi).extractall(dest)
assert os.path.isdir(os.path.join(dest, 'chrome'))
finally:
rm_local_tmp_dir(dest)
class TestParseSearch(amo.tests.TestCase, amo.tests.AMOPaths):
def parse(self, filename='search.xml'):
return parse_addon(open(self.file_fixture_path(filename)))
def extract(self):
# This is the expected return value from extract_search.
return {'url': {u'type': u'text/html', u'template':
u'http://www.yyy.com?q={searchTerms}'},
'xmlns': u'http://a9.com/-/spec/opensearch/1.1/',
'name': u'search tool',
'description': u'Search Engine for Firefox'}
def test_basics(self):
# This test breaks if the day changes. Have fun with that!
eq_(self.parse(), {
'guid': None,
'name': 'search tool',
'version': datetime.now().strftime('%Y%m%d'),
'summary': 'Search Engine for Firefox',
'type': amo.ADDON_SEARCH})
@mock.patch('files.utils.extract_search')
def test_extract_search_error(self, extract_mock):
extract_mock.side_effect = Exception
with self.assertRaises(forms.ValidationError) as e:
self.parse()
assert e.exception.messages[0].startswith('Could not parse ')
@mock.patch('files.utils.parse_xpi')
@mock.patch('files.utils.parse_search')
def test_parse_addon(search_mock, xpi_mock):
parse_addon('file.xpi', None)
xpi_mock.assert_called_with('file.xpi', None)
parse_addon('file.xml', None)
search_mock.assert_called_with('file.xml', None)
parse_addon('file.jar', None)
xpi_mock.assert_called_with('file.jar', None)
def test_parse_xpi():
"""Fire.fm can sometimes give us errors. Let's prevent that."""
firefm = os.path.join(settings.ROOT,
'apps/files/fixtures/files/firefm.xpi')
rdf = parse_xpi(open(firefm))
eq_(rdf['name'], 'Fire.fm')
class TestCheckJetpackVersion(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(id=3615)
JetpackUpgrader().jetpack_versions('1.0', '1.1')
@mock.patch('files.tasks.start_upgrade.delay')
def test_upgrade(self, upgrade_mock):
File.objects.update(jetpack_version='1.0')
ids = list(File.objects.values_list('id', flat=True))
devhub.signals.submission_done.send(sender=self.addon)
upgrade_mock.assert_called_with(ids, priority='high')
@mock.patch('files.tasks.start_upgrade.delay')
def test_no_upgrade(self, upgrade_mock):
File.objects.update(jetpack_version=None)
devhub.signals.submission_done.send(sender=self.addon)
assert not upgrade_mock.called
File.objects.update(jetpack_version='0.9')
devhub.signals.submission_done.send(sender=self.addon)
assert not upgrade_mock.called
class LanguagePackBase(UploadTest):
def setUp(self):
super(LanguagePackBase, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_LPAPP)
self.platform = Platform.objects.create(id=amo.PLATFORM_ALL.id)
self.version = Version.objects.create(addon=self.addon)
self.addon.update(status=amo.STATUS_PUBLIC)
self.addon._current_version = self.version
class TestLanguagePack(LanguagePackBase):
def file_create(self, path):
return (File.objects.create(platform=self.platform,
version=self.version,
filename=self.xpi_path(path)))
def test_extract(self):
obj = self.file_create('langpack-localepicker')
assert 'title=Select a language' in obj.get_localepicker()
def test_extract_no_chrome_manifest(self):
obj = self.file_create('langpack')
eq_(obj.get_localepicker(), '')
def test_zip_invalid(self):
obj = self.file_create('search.xml')
eq_(obj.get_localepicker(), '')
@mock.patch('files.utils.SafeUnzip.extract_path')
def test_no_locale_browser(self, extract_path):
extract_path.return_value = 'some garbage'
obj = self.file_create('langpack-localepicker')
eq_(obj.get_localepicker(), '')
@mock.patch('files.utils.SafeUnzip.extract_path')
def test_corrupt_locale_browser_path(self, extract_path):
extract_path.return_value = 'locale browser de woot?!'
obj = self.file_create('langpack-localepicker')
eq_(obj.get_localepicker(), '')
extract_path.return_value = 'locale browser de woo:t?!as'
# Result should be 'locale browser de woo:t?!as', but we have caching.
eq_(obj.get_localepicker(), '')
@mock.patch('files.utils.SafeUnzip.extract_path')
def test_corrupt_locale_browser_data(self, extract_path):
extract_path.return_value = 'locale browser de jar:install.rdf!foo'
obj = self.file_create('langpack-localepicker')
eq_(obj.get_localepicker(), '')
def test_hits_cache(self):
obj = self.file_create('langpack-localepicker')
assert 'title=Select a language' in obj.get_localepicker()
obj.update(filename='garbage')
assert 'title=Select a language' in obj.get_localepicker()
@mock.patch('files.models.File.get_localepicker')
def test_cache_on_create(self, get_localepicker):
self.file_create('langpack-localepicker')
assert get_localepicker.called
@mock.patch('files.models.File.get_localepicker')
def test_cache_not_on_create(self, get_localepicker):
self.addon.update(type=amo.ADDON_DICT)
self.file_create('langpack-localepicker')
assert not get_localepicker.called
class TestSignedPath(amo.tests.TestCase):
fixtures = ['webapps/337141-steamcube']
def setUp(self):
self.file_ = File.objects.get(pk=81555)
def test_path(self):
path = (self.file_.file_path
.replace('.webapp', '.signed.webapp')
.replace(settings.ADDONS_PATH, settings.SIGNED_APPS_PATH))
eq_(self.file_.signed_file_path, path)
def test_reviewer_path(self):
path = (self.file_.file_path
.replace('.webapp', '.signed.webapp')
.replace(settings.ADDONS_PATH,
settings.SIGNED_APPS_REVIEWER_PATH))
eq_(self.file_.signed_reviewer_file_path, path)
| wagnerand/zamboni | apps/files/tests/test_models.py | Python | bsd-3-clause | 40,312 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import pytest
from wtforms.validators import ValidationError
from invenio_groups.forms import EmailsValidator, NewMemberForm
def test_emails_validator(app):
"""Test validation of email addresses."""
with app.app_context():
validator = EmailsValidator()
form = NewMemberForm()
form.emails.data = '\n'.join([
'',
'test_example@example.com',
'example@net.com',
'',
'test.user@example.net',
])
validator(form, form.emails)
form.emails.data += '\ninvalid_email'
with pytest.raises(ValidationError):
validator(form, form.emails)
| inveniosoftware/invenio-groups | tests/test_forms.py | Python | gpl-2.0 | 1,637 |
import sublime
import sublime_plugin
from ..settings import *
from .base_window import BaseWindowCommand
PackageControl = __import__('Package Control')
class PackageBundlerManagerCommand(BaseWindowCommand):
management_options_label = ['Add ignored package', 'Remove ignored package']
def chosen_bundle(self, picked):
if picked == -1:
return
bundles = self.get_bundles_list()
self.picked_bundle = bundles[picked]
self.show_quick_panel(self.management_options_label, self.chosen_management)
def chosen_management(self, picked):
if picked == -1:
return
self.management_options[picked](self)
def pick_new_ignored_package(self):
packages_list = self.get_ignorable_packages()
if not packages_list:
sublime.error_message('Package Bundler: There is not package to disable')
return
self.show_quick_panel(packages_list, self.add_ignored_package)
def add_ignored_package(self, picked):
if picked == -1:
return
packages_list = self.get_ignorable_packages()
ignored_package = packages_list[picked]
bundles = self.settings.get('bundles')
bundle_ignored_packages = bundles[self.picked_bundle]['ignored_packages']
bundle_ignored_packages.append(ignored_package)
bundle_ignored_packages.sort()
self.settings.set('bundles', bundles)
sublime.save_settings(pb_settings_filename())
sublime.status_message('Package Bundler: package '+ignored_package+' added to '+self.picked_bundle+' bundle\'s ignore list')
def pick_old_ignored_package(self):
bundle_ignored_packages = self.settings.get('bundles')[self.picked_bundle]['ignored_packages']
bundle_ignored_packages.sort()
if not bundle_ignored_packages:
sublime.error_message('Package Bundler: No package to remove from ignore list')
return
self.show_quick_panel(bundle_ignored_packages, self.remove_ignored_package)
def remove_ignored_package(self, picked):
if picked == -1:
return
packages_list = self.get_ignorable_packages()
ignored_package = packages_list[picked]
bundles = self.settings.get('bundles')
bundles[self.picked_bundle]['ignored_packages'].remove(bundles[self.picked_bundle]['ignored_packages'][picked])
self.settings.set('bundles', bundles)
sublime.save_settings(pb_settings_filename())
sublime.status_message('Package Bundler: package '+ignored_package+' removed from '+self.picked_bundle+' bundle\'s ignore list')
def get_ignorable_packages(self):
manager = PackageControl.package_control.package_manager.PackageManager()
all_packages = manager.list_all_packages()
ignored_packages = self.settings.get('bundles')[self.picked_bundle]['ignored_packages']
if not ignored_packages:
ignored_packages = []
packages_list = list(set(all_packages) - set(ignored_packages))
packages_list.sort()
return packages_list
management_options = {
0: pick_new_ignored_package,
1: pick_old_ignored_package
}
| STPackageBundler/package-bundler | package_bundler/commands/manager.py | Python | mit | 3,239 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from functools import lru_cache
from typing import Optional, Set
import jinja2
from docutils import nodes
from docutils.nodes import Element
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
from provider_yaml_utils import ( # pylint: disable=no-name-in-module
get_provider_yaml_paths,
load_package_data,
)
from sphinx.util import nested_parse_with_titles
from sphinx.util.docutils import switch_source_input
CMD_OPERATORS_AND_HOOKS = "operators-and-hooks"
CMD_TRANSFERS = 'transfers'
"""
Directives for rendering tables with operators.
To test the template rendering process, you can also run this script as a standalone program.
PYTHONPATH=$PWD/../ python exts/operators_and_hooks_ref.py --help
"""
DEFAULT_HEADER_SEPARATOR = "="
CURRENT_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir, os.pardir))
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
@lru_cache(maxsize=None)
def _get_jinja_env():
loader = jinja2.FileSystemLoader(CURRENT_DIR, followlinks=True)
env = jinja2.Environment(loader=loader, undefined=jinja2.StrictUndefined)
return env
def _render_template(template_name, **kwargs):
return _get_jinja_env().get_template(template_name).render(**kwargs)
def _docs_path(filepath: str):
if not filepath.startswith("/docs/"):
raise Exception(f"The path must starts with '/docs/'. Current value: {filepath}")
if not filepath.endswith(".rst"):
raise Exception(f"The path must ends with '.rst'. Current value: {filepath}")
if filepath.startswith("/docs/apache-airflow-providers-"):
_, _, provider, rest = filepath.split("/", maxsplit=3)
filepath = f"{provider}:{rest}"
else:
filepath = os.path.join(ROOT_DIR, filepath.lstrip('/'))
filepath = os.path.relpath(filepath, DOCS_DIR)
len_rst = len(".rst")
filepath = filepath[:-len_rst]
return filepath
def _prepare_resource_index(package_data, resource_type):
return {
integration["integration-name"]: {**integration, 'package-name': provider['package-name']}
for provider in package_data
for integration in provider.get(resource_type, [])
}
def _prepare_operators_data(tags: Optional[Set[str]]):
package_data = load_package_data()
all_integrations = _prepare_resource_index(package_data, "integrations")
if tags is None:
to_display_integration = all_integrations
else:
to_display_integration = [
integration for integration in all_integrations.values() if tags.intersection(integration["tags"])
]
all_operators_by_integration = _prepare_resource_index(package_data, "operators")
all_hooks_by_integration = _prepare_resource_index(package_data, "hooks")
all_sensors_by_integration = _prepare_resource_index(package_data, "hooks")
results = []
for integration in to_display_integration:
item = {
"integration": integration,
}
operators = all_operators_by_integration.get(integration['integration-name'])
sensors = all_sensors_by_integration.get(integration['integration-name'])
hooks = all_hooks_by_integration.get(integration['integration-name'])
if 'how-to-guide' in item['integration']:
item['integration']['how-to-guide'] = [_docs_path(d) for d in item['integration']['how-to-guide']]
if operators:
item['operators'] = operators
if sensors:
item['hooks'] = sensors
if hooks:
item['hooks'] = hooks
if operators or sensors or hooks:
results.append(item)
return sorted(results, key=lambda d: d["integration"]["integration-name"].lower())
def _render_operator_content(*, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
tabular_data = _prepare_operators_data(tags)
return _render_template(
"operators_and_hooks_ref.rst.jinja2", items=tabular_data, header_separator=header_separator
)
def _prepare_transfer_data(tags: Optional[Set[str]]):
package_data = load_package_data()
all_operators_by_integration = _prepare_resource_index(package_data, "integrations")
# Add edge case
for name in ["SQL", "Local"]:
all_operators_by_integration[name] = {"integration-name": name}
all_transfers = [
{
**transfer,
'package-name': provider['package-name'],
'source-integration': all_operators_by_integration[transfer['source-integration-name']],
'target-integration': all_operators_by_integration[transfer['target-integration-name']],
}
for provider in package_data
for transfer in provider.get("transfers", [])
]
if tags is None:
to_display_transfers = all_transfers
else:
to_display_transfers = [
transfer
for transfer in all_transfers
if tags.intersection(transfer['source-integration'].get('tags', set()))
or tags.intersection(transfer['target-integration'].get('tags', set()))
]
for transfer in to_display_transfers:
if 'how-to-guide' not in transfer:
continue
transfer['how-to-guide'] = _docs_path(transfer['how-to-guide'])
return to_display_transfers
def _render_transfer_content(*, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
tabular_data = _prepare_transfer_data(tags)
return _render_template(
"operators_and_hooks_ref-transfers.rst.jinja2", items=tabular_data, header_separator=header_separator
)
class BaseJinjaReferenceDirective(Directive):
"""The base directive for OperatorsHooksReferenceDirective and TransfersReferenceDirective"""
optional_arguments = 1
option_spec = {"tags": directives.unchanged, 'header-separator': directives.unchanged_required}
def run(self):
tags_arg = self.options.get("tags")
tags = {t.strip() for t in tags_arg.split(",")} if tags_arg else None
header_separator = self.options.get('header-separator')
new_content = self.render_content(tags=tags, header_separator=header_separator)
with switch_source_input(self.state, self.content):
new_content = StringList(new_content.splitlines(), source='')
node = nodes.section() # type: Element
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, new_content, node)
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for filepath in get_provider_yaml_paths():
self.state.document.settings.record_dependencies.add(filepath)
return node.children
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
"""Return content in RST format"""
raise NotImplementedError("Tou need to override render_content method.")
class OperatorsHooksReferenceDirective(BaseJinjaReferenceDirective):
"""Generates a list of operators, sensors, hooks"""
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_operator_content(
tags=tags,
header_separator=header_separator,
)
class TransfersReferenceDirective(BaseJinjaReferenceDirective):
"""Generate a list of transfer operators"""
def render_content(self, *, tags: Optional[Set[str]], header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_transfer_content(
tags=tags,
header_separator=header_separator,
)
def setup(app):
"""Setup plugin"""
app.add_directive('operators-hooks-ref', OperatorsHooksReferenceDirective)
app.add_directive('transfers-ref', TransfersReferenceDirective)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Render tables with integrations.')
parser.add_argument(
'--tag',
dest='tags',
action="append",
help='If passed, displays integrations that have a matching tag.',
)
parser.add_argument('--header-separator', default=DEFAULT_HEADER_SEPARATOR)
subparsers = parser.add_subparsers(help='sub-command help', metavar="COMMAND")
subparsers.required = True
parser_a = subparsers.add_parser(CMD_OPERATORS_AND_HOOKS)
parser_a.set_defaults(cmd=CMD_OPERATORS_AND_HOOKS)
parser_b = subparsers.add_parser(CMD_TRANSFERS)
parser_b.set_defaults(cmd=CMD_TRANSFERS)
args = parser.parse_args()
if args.cmd == CMD_OPERATORS_AND_HOOKS:
content = _render_operator_content(
tags=set(args.tags) if args.tags else None, header_separator=args.header_separator
)
else:
content = _render_transfer_content(
tags=set(args.tags) if args.tags else None, header_separator=args.header_separator
)
print(content)
| airbnb/airflow | docs/exts/operators_and_hooks_ref.py | Python | apache-2.0 | 9,998 |
# -*- coding: utf-8 -*-
# Gitless - a version control system built on top of Git
# Licensed under MIT
"""End-to-end test."""
import logging
import os
import re
import time
from subprocess import CalledProcessError
import sys
from gitless.tests import utils
try:
text = unicode
except NameError:
text = str
class TestEndToEnd(utils.TestBase):
def setUp(self):
super(TestEndToEnd, self).setUp('gl-e2e-test')
utils.gl('init')
# Disable colored output so that we don't need to worry about ANSI escape
# codes
utils.git('config', 'color.ui', 'False')
# Disable paging so that we don't have to use sh's _tty_out option, which is
# not available on pbs
if sys.platform != 'win32':
utils.git('config', 'core.pager', 'cat')
else:
# On Windows, we need to call 'type' through cmd.exe (with 'cmd'). The /C
# is so that the command window gets closed after 'type' finishes
utils.git('config', 'core.pager', 'cmd /C type')
utils.set_test_config()
class TestNotInRepo(utils.TestBase):
def setUp(self):
super(TestNotInRepo, self).setUp('gl-e2e-test')
def test_not_in_repo(self):
def assert_not_in_repo(*cmds):
for cmd in cmds:
self.assertRaisesRegexp(
CalledProcessError, 'not in a Gitless\'s repository', utils.gl, cmd)
assert_not_in_repo(
'status', 'diff', 'commit', 'branch', 'merge', 'fuse', 'remote',
'publish', 'history')
class TestBasic(TestEndToEnd):
def test_basic_functionality(self):
utils.write_file('file1', 'Contents of file1')
# Track
utils.gl('track', 'file1')
self.assertRaises(CalledProcessError, utils.gl, 'track', 'file1')
self.assertRaises(CalledProcessError, utils.gl, 'track', 'non-existent')
# Untrack
utils.gl('untrack', 'file1')
self.assertRaises(CalledProcessError, utils.gl, 'untrack', 'file1')
self.assertRaises(CalledProcessError, utils.gl, 'untrack', 'non-existent')
# Commit
utils.gl('track', 'file1')
utils.gl('commit', '-m', 'file1 commit')
self.assertRaises(
CalledProcessError, utils.gl, 'commit', '-m', 'nothing to commit')
# History
if 'file1 commit' not in utils.gl('history'):
self.fail('Commit didn\'t appear in history')
# Branch
# Make some changes to file1 and branch out
utils.write_file('file1', 'New contents of file1')
utils.gl('branch', '-c', 'branch1')
utils.gl('switch', 'branch1')
if 'New' in utils.read_file('file1'):
self.fail('Branch not independent!')
# Switch back to master branch, check that contents are the same as before.
utils.gl('switch', 'master')
if 'New' not in utils.read_file('file1'):
self.fail('Branch not independent!')
out = utils.gl('branch')
if '* master' not in out:
self.fail('Branch status output wrong: {0}'.format(out))
if 'branch1' not in out:
self.fail('Branch status output wrong: {0}'.format(out))
utils.gl('branch', '-c', 'branch2')
utils.gl('branch', '-c', 'branch-conflict1')
utils.gl('branch', '-c', 'branch-conflict2')
utils.gl('commit', '-m', 'New contents commit')
# Fuse
utils.gl('switch', 'branch1')
self.assertRaises(CalledProcessError, utils.gl, 'fuse') # no upstream set
try:
utils.gl('fuse', 'master')
except CalledProcessError as e:
self.fail(utils.stderr(e))
out = utils.gl('history')
if 'file1 commit' not in out:
self.fail(out)
# Merge
utils.gl('switch', 'branch2')
self.assertRaises(CalledProcessError, utils.gl, 'merge') # no upstream set
utils.gl('merge', 'master')
out = utils.gl('history')
if 'file1 commit' not in out:
self.fail(out)
# Conflicting fuse
utils.gl('switch', 'branch-conflict1')
utils.write_file('file1', 'Conflicting changes to file1')
utils.gl('commit', '-m', 'changes in branch-conflict1')
try:
utils.gl('fuse', 'master')
except CalledProcessError as e:
# expected
err = e.stderr
if 'conflict' not in err:
self.fail(err)
out = utils.gl('status')
if 'file1 (with conflicts)' not in out:
self.fail(out)
# Try aborting
utils.gl('fuse', '--abort')
out = utils.gl('status')
if 'file1' in out:
self.fail(out)
# Ok, now let's fix the conflicts
try:
utils.gl('fuse', 'master')
except CalledProcessError as e:
# expected
err = e.stderr
if 'conflict' not in err:
self.fail(err)
out = utils.gl('status')
if 'file1 (with conflicts)' not in out:
self.fail(out)
utils.write_file('file1', 'Fixed conflicts!')
self.assertRaises(
CalledProcessError, utils.gl, 'commit', '-m', 'resolve not called')
self.assertRaises(
CalledProcessError, utils.gl, 'resolve', 'non-existent')
utils.gl('resolve', 'file1')
utils.gl('commit', '-m', 'fixed conflicts')
class TestCommit(TestEndToEnd):
TRACKED_FP = 'file1'
DIR_TRACKED_FP = 'dir/dir_file'
UNTRACKED_FP = 'file2'
FPS = [TRACKED_FP, DIR_TRACKED_FP, UNTRACKED_FP]
DIR = 'dir'
def setUp(self):
super(TestCommit, self).setUp()
utils.write_file(self.TRACKED_FP)
utils.write_file(self.DIR_TRACKED_FP)
utils.write_file(self.UNTRACKED_FP)
utils.gl('track', self.TRACKED_FP, self.DIR_TRACKED_FP)
def test_commit(self):
utils.gl('commit', '-m', 'msg')
self.__assert_commit(self.TRACKED_FP, self.DIR_TRACKED_FP)
def test_commit_relative(self):
os.chdir(self.DIR)
utils.gl('commit', '-m', 'msg')
self.__assert_commit(self.TRACKED_FP, self.DIR_TRACKED_FP)
def test_commit_only(self):
utils.gl('commit', self.TRACKED_FP, '-m', 'msg')
self.__assert_commit(self.TRACKED_FP)
def test_commit_only_relative(self):
os.chdir(self.DIR)
self.assertRaises(
CalledProcessError, utils.gl, 'commit', self.TRACKED_FP, '-m', 'msg')
utils.gl('commit', '../' + self.TRACKED_FP, '-m', 'msg')
self.__assert_commit(self.TRACKED_FP)
def test_commit_only_untrack(self):
utils.gl('commit', '-m', 'msg', self.UNTRACKED_FP)
self.__assert_commit(self.UNTRACKED_FP)
def test_commit_only_untrack_relative(self):
os.chdir(self.DIR)
self.assertRaises(
CalledProcessError, utils.gl, 'commit', self.UNTRACKED_FP, '-m', 'msg')
utils.gl('commit', '../' + self.UNTRACKED_FP, '-m', 'msg')
self.__assert_commit(self.UNTRACKED_FP)
def test_commit_include(self):
utils.gl('commit', '-m', 'msg', '--include', self.UNTRACKED_FP)
self.__assert_commit(
self.TRACKED_FP, self.DIR_TRACKED_FP, self.UNTRACKED_FP)
def test_commit_exclude_include(self):
utils.gl(
'commit', '-m', 'msg',
'--include', self.UNTRACKED_FP, '--exclude', self.TRACKED_FP)
self.__assert_commit(self.UNTRACKED_FP, self.DIR_TRACKED_FP)
def test_commit_no_files(self):
self.assertRaises(
CalledProcessError, utils.gl, 'commit', '--exclude',
self.TRACKED_FP, self.DIR_TRACKED_FP, '-m', 'msg')
self.assertRaises(
CalledProcessError, utils.gl, 'commit', 'non-existent', '-m', 'msg')
self.assertRaises(
CalledProcessError, utils.gl, 'commit', '-m', 'msg',
'--exclude', 'non-existent')
self.assertRaises(
CalledProcessError, utils.gl, 'commit', '-m', 'msg',
'--include', 'non-existent')
def test_commit_dir(self):
fp = 'dir/f'
utils.write_file(fp)
utils.gl('commit', fp, '-m', 'msg')
self.__assert_commit('dir/f')
def __assert_commit(self, *expected_committed):
h = utils.gl('history', '-v')
for fp in expected_committed:
if fp not in h:
self.fail('{0} was apparently not committed!'.format(fp))
expected_not_committed = [
fp for fp in self.FPS if fp not in expected_committed]
for fp in expected_not_committed:
if fp in h:
self.fail('{0} was apparently committed!'.format(fp))
class TestStatus(TestEndToEnd):
DIR = 'dir'
TRACKED_DIR_FP = os.path.join('dir', 'file1')
UNTRACKED_DIR_FP = os.path.join('dir', 'file2')
def setUp(self):
super(TestStatus, self).setUp()
utils.write_file(self.TRACKED_DIR_FP)
utils.write_file(self.UNTRACKED_DIR_FP)
utils.gl('commit', self.TRACKED_DIR_FP, '-m', 'commit')
def test_status_relative(self):
utils.write_file(self.TRACKED_DIR_FP, contents='some modifications')
st = utils.gl('status')
if self.TRACKED_DIR_FP not in st:
self.fail()
if self.UNTRACKED_DIR_FP not in st:
self.fail()
os.chdir(self.DIR)
st = utils.gl('status')
rel_tracked = os.path.relpath(self.TRACKED_DIR_FP, self.DIR)
rel_untracked = os.path.relpath(self.UNTRACKED_DIR_FP, self.DIR)
if (self.TRACKED_DIR_FP in st) or (rel_tracked not in st):
self.fail()
if (self.UNTRACKED_DIR_FP in st) or (rel_untracked not in st):
self.fail()
class TestBranch(TestEndToEnd):
BRANCH_1 = 'branch1'
BRANCH_2 = 'branch2'
def setUp(self):
super(TestBranch, self).setUp()
utils.write_file('f')
utils.gl('commit', 'f', '-m', 'commit')
def test_create(self):
utils.gl('branch', '-c', self.BRANCH_1)
self.assertRaises(
CalledProcessError, utils.gl, 'branch', '-c', self.BRANCH_1)
self.assertRaises(
CalledProcessError, utils.gl, 'branch', '-c', 'evil*named*branch')
if self.BRANCH_1 not in utils.gl('branch'):
self.fail()
def test_remove(self):
utils.gl('branch', '-c', self.BRANCH_1)
utils.gl('switch', self.BRANCH_1)
self.assertRaises(
CalledProcessError, utils.gl, 'branch', '-d', self.BRANCH_1, _in='y')
utils.gl('branch', '-c', self.BRANCH_2)
utils.gl('switch', self.BRANCH_2)
utils.gl('branch', '-d', self.BRANCH_1, _in='n')
utils.gl('branch', '-d', self.BRANCH_1, _in='y')
if self.BRANCH_1 in utils.gl('branch'):
self.fail()
def test_upstream(self):
self.assertRaises(CalledProcessError, utils.gl, 'branch', '-uu')
self.assertRaises(
CalledProcessError, utils.gl, 'branch', '-su', 'non-existent')
self.assertRaises(
CalledProcessError, utils.gl, 'branch', '-su', 'non-existent/non-existent')
def test_list(self):
utils.gl('branch', '-c', self.BRANCH_1)
utils.gl('branch', '-c', self.BRANCH_2)
branch_out = utils.gl('branch')
self.assertTrue(
branch_out.find(self.BRANCH_1) < branch_out.find(self.BRANCH_2))
class TestTag(TestEndToEnd):
TAG_1 = 'tag1'
TAG_2 = 'tag2'
def setUp(self):
super(TestTag, self).setUp()
utils.write_file('f')
utils.gl('commit', 'f', '-m', 'commit')
def test_create(self):
utils.gl('tag', '-c', self.TAG_1)
self.assertRaises(CalledProcessError, utils.gl, 'tag', '-c', self.TAG_1)
self.assertRaises(
CalledProcessError, utils.gl, 'tag', '-c', 'evil*named*tag')
if self.TAG_1 not in utils.gl('tag'):
self.fail()
def test_remove(self):
utils.gl('tag', '-c', self.TAG_1)
utils.gl('tag', '-d', self.TAG_1, _in='n')
utils.gl('tag', '-d', self.TAG_1, _in='y')
if self.TAG_1 in utils.gl('tag'):
self.fail()
def test_list(self):
utils.gl('tag', '-c', self.TAG_1)
utils.gl('tag', '-c', self.TAG_2)
tag_out = utils.gl('tag')
self.assertTrue(
tag_out.find(self.TAG_1) < tag_out.find(self.TAG_2))
class TestDiffFile(TestEndToEnd):
TRACKED_FP = 't_fp'
DIR_TRACKED_FP = os.path.join('dir', 't_fp')
UNTRACKED_FP = 'u_fp'
DIR = 'dir'
def setUp(self):
super(TestDiffFile, self).setUp()
utils.write_file(self.TRACKED_FP)
utils.write_file(self.DIR_TRACKED_FP)
utils.gl('commit', self.TRACKED_FP, self.DIR_TRACKED_FP, '-m', 'commit')
utils.write_file(self.UNTRACKED_FP)
def test_empty_diff(self):
if 'No files to diff' not in utils.gl('diff'):
self.fail()
def test_diff_nonexistent_fp(self):
try:
utils.gl('diff', 'file')
except CalledProcessError as e:
# expected
err = e.stderr
if 'doesn\'t exist' not in err:
self.fail()
def test_basic_diff(self):
utils.write_file(self.TRACKED_FP, contents='contents')
out1 = utils.gl('diff')
if '+contents' not in out1:
self.fail()
out2 = utils.gl('diff', self.TRACKED_FP)
if '+contents' not in out2:
self.fail()
self.assertEqual(out1, out2)
def test_basic_diff_relative(self):
utils.write_file(self.TRACKED_FP, contents='contents_tracked')
utils.write_file(self.DIR_TRACKED_FP, contents='contents_dir_tracked')
os.chdir(self.DIR)
out1 = utils.gl('diff')
if '+contents_tracked' not in out1:
self.fail()
if '+contents_dir_tracked' not in out1:
self.fail()
rel_dir_tracked_fp = os.path.relpath(self.DIR_TRACKED_FP, self.DIR)
out2 = utils.gl('diff', rel_dir_tracked_fp)
if '+contents_dir_tracked' not in out2:
self.fail()
def test_diff_dir(self):
fp = 'dir/dir/f'
utils.write_file(fp, contents='contents')
out = utils.gl('diff', fp)
if '+contents' not in out:
self.fail()
def test_diff_non_ascii(self):
if sys.platform == 'win32':
# Skip this test on Windows until we fix Unicode support
return
contents = '’◕‿◕’©Ä☺’ಠ_ಠ’'
utils.write_file(self.TRACKED_FP, contents=contents)
out1 = utils.gl('diff')
if '+' + contents not in out1:
self.fail('out is ' + out1)
out2 = utils.gl('diff', self.TRACKED_FP)
if '+' + contents not in out2:
self.fail('out is ' + out2)
self.assertEqual(out1, out2)
class TestOp(TestEndToEnd):
COMMITS_NUMBER = 4
OTHER = 'other'
MASTER_FILE = 'master_file'
OTHER_FILE = 'other_file'
def setUp(self):
super(TestOp, self).setUp()
self.commits = {}
def create_commits(branch_name, fp):
self.commits[branch_name] = []
utils.append_to_file(fp, contents='contents {0}\n'.format(0))
out = utils.gl(
'commit', '-m', 'ci 0 in {0}'.format(branch_name), '--include', fp)
self.commits[branch_name].append(
re.search(r'Commit Id: (\S*)', out, re.UNICODE).group(1))
for i in range(1, self.COMMITS_NUMBER):
utils.append_to_file(fp, contents='contents {0}\n'.format(i))
out = utils.gl('commit', '-m', 'ci {0} in {1}'.format(i, branch_name))
self.commits[branch_name].append(
re.search(r'Commit Id: (\S*)', out, re.UNICODE).group(1))
utils.gl('branch', '-c', self.OTHER)
create_commits('master', self.MASTER_FILE)
try:
utils.gl('switch', self.OTHER)
except CalledProcessError as e:
raise Exception(e.stderr)
create_commits(self.OTHER, self.OTHER_FILE)
utils.gl('switch', 'master')
class TestFuse(TestOp):
def __assert_history(self, expected):
out = utils.gl('history')
cids = list(reversed(re.findall(r'ci (.*) in (\S*)', out, re.UNICODE)))
self.assertCountEqual(
cids, expected, 'cids is ' + text(cids) + ' exp ' + text(expected))
st_out = utils.gl('status')
self.assertFalse('fuse' in st_out)
def __build(self, branch_name, cids=None):
if not cids:
cids = range(self.COMMITS_NUMBER)
return [(text(ci), branch_name) for ci in cids]
def test_basic(self):
utils.gl('fuse', self.OTHER)
self.__assert_history(self.__build(self.OTHER) + self.__build('master'))
def test_only_errors(self):
self.assertRaises(
CalledProcessError, utils.gl, 'fuse', self.OTHER, '-o', 'non-existent-id')
self.assertRaises(
CalledProcessError, utils.gl, 'fuse', self.OTHER,
'-o', self.commits['master'][1])
def test_only_one(self):
utils.gl('fuse', self.OTHER, '-o', self.commits[self.OTHER][0])
self.__assert_history(
self.__build(self.OTHER, cids=[0]) + self.__build('master'))
def test_only_some(self):
utils.gl('fuse', self.OTHER, '-o', *self.commits[self.OTHER][:2])
self.__assert_history(
self.__build(self.OTHER, [0, 1]) + self.__build('master'))
def test_exclude_errors(self):
self.assertRaises(
CalledProcessError, utils.gl, 'fuse', self.OTHER, '-e', 'non-existent-id')
self.assertRaises(
CalledProcessError, utils.gl, 'fuse', self.OTHER,
'-e', self.commits['master'][1])
def test_exclude_one(self):
last_ci = self.COMMITS_NUMBER - 1
utils.gl('fuse', self.OTHER, '-e', self.commits[self.OTHER][last_ci])
self.__assert_history(
self.__build(self.OTHER, range(0, last_ci)) + self.__build('master'))
def test_exclude_some(self):
utils.gl('fuse', self.OTHER, '-e', *self.commits[self.OTHER][1:])
self.__assert_history(
self.__build(self.OTHER, cids=[0]) + self.__build('master'))
def test_ip_dp(self):
utils.gl('fuse', self.OTHER, '--insertion-point', 'dp')
self.__assert_history(self.__build(self.OTHER) + self.__build('master'))
def test_ip_head(self):
utils.gl('fuse', self.OTHER, '--insertion-point', 'HEAD')
self.__assert_history(self.__build('master') + self.__build(self.OTHER))
def test_ip_commit(self):
utils.gl('fuse', self.OTHER, '--insertion-point', self.commits['master'][1])
self.__assert_history(
self.__build('master', [0, 1]) + self.__build(self.OTHER) +
self.__build('master', range(2, self.COMMITS_NUMBER)))
def test_conflicts(self):
def trigger_conflicts():
self.assertRaisesRegexp(
CalledProcessError, 'conflicts', utils.gl, 'fuse',
self.OTHER, '-e', self.commits[self.OTHER][0])
# Abort
trigger_conflicts()
utils.gl('fuse', '-a')
self.__assert_history(self.__build('master'))
# Fix conflicts
trigger_conflicts()
utils.gl('resolve', self.OTHER_FILE)
utils.gl('commit', '-m', 'ci 1 in other')
self.__assert_history(
self.__build(self.OTHER, range(1, self.COMMITS_NUMBER)) +
self.__build('master'))
def test_conflicts_switch(self):
utils.gl('switch', 'other')
utils.write_file(self.OTHER_FILE, contents='uncommitted')
utils.gl('switch', 'master')
try:
utils.gl('fuse', self.OTHER, '-e', self.commits[self.OTHER][0])
self.fail()
except CalledProcessError:
pass
# Switch
utils.gl('switch', 'other')
self.__assert_history(self.__build('other'))
st_out = utils.gl('status')
self.assertTrue('fuse' not in st_out)
self.assertTrue('conflict' not in st_out)
utils.gl('switch', 'master')
st_out = utils.gl('status')
self.assertTrue('fuse' in st_out)
self.assertTrue('conflict' in st_out)
# Check that we are able to complete the fuse after switch
utils.gl('resolve', self.OTHER_FILE)
utils.gl('commit', '-m', 'ci 1 in other')
self.__assert_history(
self.__build(self.OTHER, range(1, self.COMMITS_NUMBER)) +
self.__build('master'))
utils.gl('switch', 'other')
self.assertEqual('uncommitted', utils.read_file(self.OTHER_FILE))
def test_conflicts_multiple(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~2')
utils.gl('switch', 'tmp')
utils.append_to_file(self.MASTER_FILE, contents='conflict')
utils.gl('commit', '-m', 'will conflict 0')
utils.append_to_file(self.MASTER_FILE, contents='conflict')
utils.gl('commit', '-m', 'will conflict 1')
self.assertRaisesRegexp(
CalledProcessError, 'conflicts', utils.gl, 'fuse', 'master')
utils.gl('resolve', self.MASTER_FILE)
self.assertRaisesRegexp(
CalledProcessError, 'conflicts', utils.gl, 'commit', '-m', 'ci 0 in tmp')
utils.gl('resolve', self.MASTER_FILE)
utils.gl('commit', '-m', 'ci 1 in tmp') # this one should finalize the fuse
self.__assert_history(
self.__build('master') + self.__build('tmp', range(2)))
def test_conflicts_multiple_uncommitted_changes(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~2')
utils.gl('switch', 'tmp')
utils.append_to_file(self.MASTER_FILE, contents='conflict')
utils.gl('commit', '-m', 'will conflict 0')
utils.append_to_file(self.MASTER_FILE, contents='conflict')
utils.gl('commit', '-m', 'will conflict 1')
utils.write_file(self.MASTER_FILE, contents='uncommitted')
self.assertRaisesRegexp(
CalledProcessError, 'conflicts', utils.gl, 'fuse', 'master')
utils.gl('resolve', self.MASTER_FILE)
self.assertRaisesRegexp(
CalledProcessError, 'conflicts', utils.gl, 'commit', '-m', 'ci 0 in tmp')
utils.gl('resolve', self.MASTER_FILE)
self.assertRaisesRegexp(
CalledProcessError, 'failed to apply', utils.gl,
'commit', '-m', 'ci 1 in tmp')
self.__assert_history(
self.__build('master') + self.__build('tmp', range(2)))
self.assertTrue('Stashed' in utils.read_file(self.MASTER_FILE))
def test_nothing_to_fuse(self):
self.assertRaisesRegexp(
CalledProcessError, 'No commits to fuse', utils.gl, 'fuse',
self.OTHER, '-e', *self.commits[self.OTHER])
def test_ff(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~2')
utils.gl('switch', 'tmp')
utils.gl('fuse', 'master')
self.__assert_history(self.__build('master'))
def test_ff_ip_head(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~2')
utils.gl('switch', 'tmp')
utils.gl('fuse', 'master', '--insertion-point', 'HEAD')
self.__assert_history(self.__build('master'))
def test_uncommitted_changes(self):
utils.write_file(self.MASTER_FILE, contents='uncommitted')
utils.write_file('master_untracked', contents='uncommitted')
utils.gl('fuse', self.OTHER)
self.assertEqual('uncommitted', utils.read_file(self.MASTER_FILE))
self.assertEqual('uncommitted', utils.read_file('master_untracked'))
def test_uncommitted_tracked_changes_that_conflict(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~1')
utils.gl('switch', 'tmp')
utils.write_file(self.MASTER_FILE, contents='uncommitted')
self.assertRaisesRegexp(
CalledProcessError, 'failed to apply', utils.gl, 'fuse',
'master', '--insertion-point', 'HEAD')
contents = utils.read_file(self.MASTER_FILE)
self.assertTrue('uncommitted' in contents)
self.assertTrue('contents 2' in contents)
def test_uncommitted_tracked_changes_that_conflict_append(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~1')
utils.gl('switch', 'tmp')
utils.append_to_file(self.MASTER_FILE, contents='uncommitted')
self.assertRaisesRegexp(
CalledProcessError, 'failed to apply', utils.gl, 'fuse',
'master', '--insertion-point', 'HEAD')
contents = utils.read_file(self.MASTER_FILE)
self.assertTrue('uncommitted' in contents)
self.assertTrue('contents 2' in contents)
# def test_uncommitted_untracked_changes_that_conflict(self):
# utils.write_file(self.OTHER_FILE, contents='uncommitted in master')
# try:
# utils.gl('fuse', self.OTHER)
# self.fail()
# except CalledProcessError as e:
# self.assertTrue('failed to apply' in utils.stderr(e))
class TestMerge(TestOp):
def test_uncommitted_changes(self):
utils.write_file(self.MASTER_FILE, contents='uncommitted')
utils.write_file('master_untracked', contents='uncommitted')
utils.gl('merge', self.OTHER)
self.assertEqual('uncommitted', utils.read_file(self.MASTER_FILE))
self.assertEqual('uncommitted', utils.read_file('master_untracked'))
def test_uncommitted_tracked_changes_that_conflict(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~1')
utils.gl('switch', 'tmp')
utils.write_file(self.MASTER_FILE, contents='uncommitted')
self.assertRaisesRegexp(
CalledProcessError, 'failed to apply', utils.gl, 'merge', 'master')
contents = utils.read_file(self.MASTER_FILE)
self.assertTrue('uncommitted' in contents)
self.assertTrue('contents 2' in contents)
def test_uncommitted_tracked_changes_that_conflict_append(self):
utils.gl('branch', '-c', 'tmp', '--divergent-point', 'HEAD~1')
utils.gl('switch', 'tmp')
utils.append_to_file(self.MASTER_FILE, contents='uncommitted')
self.assertRaisesRegexp(
CalledProcessError, 'failed to apply', utils.gl, 'merge', 'master')
contents = utils.read_file(self.MASTER_FILE)
self.assertTrue('uncommitted' in contents)
self.assertTrue('contents 2' in contents)
class TestPerformance(TestEndToEnd):
FPS_QTY = 10000
def setUp(self):
super(TestPerformance, self).setUp()
for i in range(0, self.FPS_QTY):
fp = 'f' + text(i)
utils.write_file(fp, fp)
def test_status_performance(self):
def assert_status_performance():
# The test fails if `gl status` takes more than 100 times
# the time `git status` took.
MAX_TOLERANCE = 100
t = time.time()
utils.gl('status')
gl_t = time.time() - t
t = time.time()
utils.git('status')
git_t = time.time() - t
self.assertTrue(
gl_t < git_t*MAX_TOLERANCE,
msg='gl_t {0}, git_t {1}'.format(gl_t, git_t))
# All files are untracked
assert_status_performance()
# Track all files, repeat
logging.info('Doing a massive git add, this might take a while')
utils.git('add', '.')
logging.info('Done')
assert_status_performance()
def test_branch_switch_performance(self):
MAX_TOLERANCE = 100
utils.gl('commit', 'f1', '-m', 'commit')
t = time.time()
utils.gl('branch', '-c', 'develop')
utils.gl('switch', 'develop')
gl_t = time.time() - t
# go back to previous state
utils.gl('switch', 'master')
# do the same for git
t = time.time()
utils.git('branch', 'gitdev')
utils.git('stash', 'save', '--all')
utils.git('checkout', 'gitdev')
git_t = time.time() - t
self.assertTrue(
gl_t < git_t*MAX_TOLERANCE,
msg='gl_t {0}, git_t {1}'.format(gl_t, git_t))
| sdg-mit/gitless | gitless/tests/test_e2e.py | Python | mit | 25,793 |
# ztreamy: a framework for publishing semantic events on the Web
# Copyright (C) 2011-2015 Jesus Arias Fisteus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
"""Loggers used by the framework, mainly for performance evaluation.
In principle, they are intended for internal use only.
"""
import time
from socket import gethostname
class ZtreamyDefaultLogger(object):
def __init__(self):
self.log_file = None
self.auto_flush = False
def close(self):
pass
def flush(self):
pass
def event_published(self, event):
pass
def event_dispatched(self, event):
pass
def event_delivered(self, event):
pass
def data_received(self, compressed, uncompressed):
pass
def manyc_event_finished(self, sequence_num, delays):
pass
def server_traffic_sent(self, timestamp, num_bytes):
pass
def server_closed(self, num_clients):
pass
def server_timing(self, cpu_time, real_time, init_time):
pass
def _open_file(self, node_id, filename):
self.log_file = open(filename, 'a')
self.log_file.write('# Node: %s\n# Host: %s\n#\n'%(node_id,
gethostname()))
def _write_comments(self, dict_data):
for key, value in dict_data.iteritems():
self.log_file.write('# %s: %s\n'%(key, str(value)))
def _log(self, parts):
self.log_file.write('\t'.join(parts))
self.log_file.write('\n')
if self.auto_flush:
self.log_file.flush()
class ZtreamyLogger(ZtreamyDefaultLogger):
def __init__(self, node_id, filename):
super(ZtreamyLogger, self).__init__()
self._open_file(node_id, filename)
def close(self):
self.log_file.close()
def flush(self):
self.log_file.flush()
def event_published(self, event):
parts = ['event_publish', event.event_id, timestamp()]
self._log(parts)
def event_dispatched(self, event):
parts = ['event_dispatch', event.event_id, timestamp()]
self._log(parts)
def event_delivered(self, event):
parts = ['event_deliver', event.event_id, timestamp()]
self._log(parts)
def data_received(self, compressed, uncompressed):
parts = ['data_receive', str(compressed), str(uncompressed)]
self._log(parts)
def manyc_event_finished(self, sequence_num, delays):
parts = ['manyc_event_finish', str(sequence_num)]
parts.extend([str(delay) for delay in delays])
self._log(parts)
class ZtreamyManycLogger(ZtreamyDefaultLogger):
def __init__(self, node_id, filename):
super(ZtreamyManycLogger, self).__init__()
self._open_file(node_id, filename)
def data_received(self, compressed, uncompressed):
parts = ['data_receive', str(compressed), str(uncompressed)]
self._log(parts)
def manyc_event_finished(self, sequence_num, delays):
parts = ['manyc_event_finish', str(sequence_num)]
parts.extend([str(delay) for delay in delays])
self._log(parts)
class CompactServerLogger(ZtreamyDefaultLogger):
def __init__(self, node_id, filename, comments):
super(CompactServerLogger, self).__init__()
self._open_file(node_id, filename)
self._write_comments(comments)
def server_closed(self, num_clients):
parts = ['server_closed', str(time.time()), str(num_clients)]
self._log(parts)
def server_traffic_sent(self, timestamp, num_bytes):
parts = ['server_traffic_sent', str(timestamp), str(num_bytes)]
self._log(parts)
def server_timing(self, cpu_time, real_time, init_time):
parts = ['server_timing', str(cpu_time), str(real_time),
str(init_time)]
self._log(parts)
def timestamp():
return '%.6f'%time.time()
# Default logger
logger = ZtreamyDefaultLogger()
| jfisteus/ztreamy | ztreamy/logger.py | Python | gpl-3.0 | 4,535 |
# helper functions to add (generate) calendar days in data set
import calendar
import graphlab as gl
import numpy as np
def add_running_year(month_sf, start_year):
year_attrib = []
for row_idx in range(len(month_sf)):
running_month = month_sf[row_idx]
if row_idx == 0:
year = start_year
year_attrib.append(year)
if row_idx > 0:
prev_running_month = month_sf[row_idx-1]
if(running_month >= prev_running_month):
year_attrib.append(year)
else:
year +=1
year_attrib.append(year)
year_sf = gl.SArray(year_attrib)
return year_sf
def add_month_running_date(data, year_column_name, month_column_name, wkday_column_name):
calendar.setfirstweekday(calendar.SUNDAY)
year_sf = data[year_column_name].astype(int)
month_sf = data[month_column_name].astype(int)
wkday_sf = data[wkday_column_name].astype(int)
monthcal_date_sf = []
prev_running_date = 0
for row_idx in range(len(data)):
running_year = year_sf[row_idx]
running_month = month_sf[row_idx]
running_wkday = wkday_sf[row_idx]
monthcal = calendar.monthcalendar(running_year, running_month)
if(row_idx == 0):
prev_running_year = running_year
prev_running_month = running_month
prev_running_wkday = running_wkday
month_week = 0
else:
prev_running_year = year_sf[row_idx-1]
prev_running_month = month_sf[row_idx-1]
prev_running_wkday = wkday_sf[row_idx-1]
if(running_wkday < prev_running_wkday):
month_week += 1
for week in range(month_week, len(monthcal)):
date = monthcal[week][running_wkday]
if(row_idx == 0):
prev_running_date = date
if((date is not 0) & (running_wkday < prev_running_wkday) & (date > prev_running_date)):
running_date = date
prev_running_date = date
month_week = week
monthcal_date_sf.append(running_date)
break
if((date is not 0) & (running_wkday == prev_running_wkday) & (date == prev_running_date)):
running_date = date
prev_running_date = date
month_week = week
monthcal_date_sf.append(running_date)
break
if((date is not 0) & (running_wkday > prev_running_wkday) & (date > prev_running_date)):
running_date = date
prev_running_date = date
month_week = week
monthcal_date_sf.append(running_date)
break
monthcal_date_sf = gl.SArray(monthcal_date_sf)
return monthcal_date_sf
def add_running_date(data, year_column_name, month_column_name, wkday_column_name):
year_sf = data[year_column_name]
month_sf = data[month_column_name]
data_sf = []
for row_idx in range(len(data)):
running_year = year_sf[row_idx]
running_month = month_sf[row_idx]
if(row_idx == 0):
monthdata = data.filter_by(running_year, year_column_name).filter_by(running_month, month_column_name)
monthcal_date_sf = add_month_running_date(monthdata, year_column_name, month_column_name, wkday_column_name)
data_sf.extend(monthcal_date_sf)
else:
prev_running_year = year_sf[row_idx-1]
prev_running_month = month_sf[row_idx-1]
if((running_year != prev_running_year) | (running_month != prev_running_month)):
monthdata = data.filter_by(running_year, year_column_name).filter_by(running_month, month_column_name)
monthcal_date_sf = add_month_running_date(monthdata, year_column_name, month_column_name, wkday_column_name)
data_sf.extend(monthcal_date_sf)
data_sf = gl.SArray(data_sf)
return data_sf | tgrammat/ML-Data_Challenges | Dato-tutorials/marketing-analytics/helper_functions.py | Python | apache-2.0 | 4,111 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-10 18:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20170615_1524'),
('accounts', '0007_auto_20170626_1811'),
]
operations = [
]
| pattisdr/lookit-api | accounts/migrations/0008_merge_20170710_1803.py | Python | mit | 340 |
from socket import *
from xml.dom.minidom import parse
import xml.dom.minidom
# Server connection
host = "10.1.0.46"
print host
port=7777
s=socket(AF_INET, SOCK_STREAM)
print "socket made"
s.connect((host,port))
print "socket connected"
# Open XML document using minidom parser
DOMTree = xml.dom.minidom.parse("input.xml")
nodes = DOMTree.documentElement
n_node=nodes.getElementsByTagName("node")
for node in n_node:
if node.hasAttribute("number"):
number=node.getAttribute("number")
cameras = node.getElementsByTagName("camera")
for camera in cameras:
if camera.hasAttribute("id"):
cameraid=camera.getAttribute("id")
count = camera.getElementsByTagName('count')[0]
timestamp = camera.getElementsByTagName('timestamp')[0]
message='\nNode number: {0} \n Cameraid: {1} \n Count : {2} \n Timestamp : {3} \n' .format(number,cameraid,count.childNodes[0].data,timestamp.childNodes[0].data);
s.send(message);
#s.send("asgasg");
s.send("Exit");
| rokrapoorv/SMTS | RaspPI/client.py | Python | gpl-3.0 | 1,016 |
"""
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, agilent_33220
acquire.show_settings()
acquire.show_git_status()
logger = acquire.get_script_logger(__file__)
# Parameters
suffix = 'led'
attenuations = [40]
fft_gains = [5]
f_center_all = 1e6 * np.array([2522.24, 2605.96, 2723.65, 2787.96, 3851.13])
f_center = f_center_all[0:1] # select a subset of the frequencies
fractional_frequency_shift = 0
f_center *= (1 + fractional_frequency_shift)
df_baseband_target = 5e3
fine_sweep_num_linewidths = 5
Q_max_expected = 50e3
df_coarse_sweep = f_center.min() / Q_max_expected # A coarse sweep with a resolution of one linewidth should work
df_total = 4e6 # The total span of the baseband tones
df_lo = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 10e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0 # Take the minimum amount of data, in this case one block
stream_length_seconds = 10
# Hardware
conditioner = analog.HeterodyneMarkII()
shield = hardware.Thing(name='magnetic_shield_pocket', state={'orientation': 'horizontal'})
generator = hardware.Thing(name='generator', state={'waveform': 'square', 'V_pp': 0})
hw = hardware.Hardware(conditioner, shield, generator)
ri = hardware_tools.r1h11_with_mk2(initialize=True, use_config=False)
ri.adc_valon.set_ref_select(0) # internal
ri.lo_valon.set_ref_select(1) # external
# Calculate sweep parameters, LO and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
logger.info("Baseband resolution is {:.0f} Hz using 2^{:d} samples".format(df_baseband, tone_sample_exponent))
num_sweep_tones = min(int(df_total / df_baseband), ri.max_num_waveforms(2 ** tone_sample_exponent))
logger.info("Using {:d} tones".format(num_sweep_tones))
f_baseband = f_baseband_minimum + ri.state.adc_sample_rate / 2**tone_sample_exponent * np.arange(num_sweep_tones)
logger.info("Coarse sweep span is {:.1f} MHz".format(1e-6 * f_baseband.ptp()))
coarse_stride = max(df_coarse_sweep // df_baseband, 1)
logger.info("Coarse sweep resolution is {:.0f} Hz".format(coarse_stride * df_baseband))
f_lo_center = df_lo * np.round((f_center - f_baseband.mean()) / df_lo)
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
ri.set_tone_baseband_freqs(freqs=1e-6 * f_baseband[:, np.newaxis], nsamp=2 ** tone_sample_exponent)
for lo_index, f_lo in enumerate(f_lo_center):
ri.set_lo(lomhz=1e-6 * f_lo, chan_spacing=1e-6 * df_lo)
for attenuation_index, (attenuation, fft_gain) in enumerate(zip(attenuations, fft_gains)):
ri.set_dac_attenuator(attenuation)
ri.set_fft_gain(fft_gain)
state = hw.state()
state['lo_index'] = lo_index
coarse_sweep = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,
tone_bank_indices=np.arange(0, num_sweep_tones, coarse_stride))[0]
npd.write(coarse_sweep)
coarse_f_r = coarse_sweep.resonator.f_0
coarse_Q = coarse_sweep.resonator.Q
logger.info("Coarse sweep f_r = {:.3f} MHz +/- {:.0f} Hz".format(1e-6 * coarse_f_r,
coarse_sweep.resonator.f_0_error))
logger.info("Coarse sweep Q = {:.0f} +/- {:.0f}".format(coarse_Q, coarse_sweep.resonator.Q_error))
df_filterbank = calculate.stream_sample_rate(ri_state)
f_baseband_bin_center = df_filterbank * np.round(f_baseband.mean() / df_filterbank)
f_lo_fine = df_lo * np.round((coarse_f_r - f_baseband_bin_center) / df_lo)
ri.set_lo(lomhz=1e-6 * f_lo_fine, chan_spacing=1e-6 * df_lo)
fine_indices = np.where(np.abs(f_lo_fine + f_baseband - coarse_f_r) <=
(fine_sweep_num_linewidths / 2) * (coarse_f_r / coarse_Q))[0]
fine_sweep = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,
tone_bank_indices=fine_indices)[0]
ri.select_bank(np.argmin(np.abs(f_baseband_bin_center - f_baseband)))
ri.select_fft_bins(np.array([0]))
vpp = float(raw_input("Enable the function generator output enter the p-p voltage: "))
state['generator']['V_pp'] = vpp
logger.info("Recording {:.1f} s stream".format(stream_length_seconds))
stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True)[0]
sweep_stream = basic.SingleSweepStream(sweep=fine_sweep, stream=stream, state=state)
npd.write(sweep_stream)
npd.write(ri.get_adc_measurement())
raw_input("Disable the function generator output.")
state['generator']['V_pp'] = 0
finally:
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
| ColumbiaCMB/kid_readout | apps/data_taking_scripts/cooldown/2017-02-10_hpd/r1h11_sweepstream_led.py | Python | bsd-2-clause | 5,355 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-07 02:42
from __future__ import unicode_literals
from django.db import migrations
# NOTE: The name of the constrait is generated by Django ORM.
UNIQUE_INDEX_NAME = 'rest_framework_reactive_item_observer_id_order_9b8adde6_uniq'
class Migration(migrations.Migration):
dependencies = [('rest_framework_reactive', '0001_initial')]
operations = [
migrations.RunSQL(
[
# Drop old constraint.
"ALTER TABLE rest_framework_reactive_item DROP CONSTRAINT {}".format(
UNIQUE_INDEX_NAME
),
# Re-create the unique index.
"CREATE UNIQUE INDEX {} ON rest_framework_reactive_item (observer_id, \"order\")".format(
UNIQUE_INDEX_NAME
),
# Create new deferrable constraint.
"""ALTER TABLE rest_framework_reactive_item ADD CONSTRAINT
{0} UNIQUE USING INDEX {0} DEFERRABLE INITIALLY IMMEDIATE""".format(
UNIQUE_INDEX_NAME
),
]
)
]
| genialis/django-rest-framework-reactive | src/rest_framework_reactive/migrations/0002_defer_order_constraint.py | Python | apache-2.0 | 1,152 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.compat}.
"""
from __future__ import division, absolute_import
import socket, sys, traceback
from twisted.trial import unittest
from twisted.python.compat import set, frozenset, reduce, execfile, _PY3
from twisted.python.compat import comparable, cmp, nativeString, networkString
from twisted.python.compat import unicode as unicodeCompat, lazyByteSlice
from twisted.python.compat import reraise, NativeStringIO, iterbytes, intToBytes
from twisted.python.filepath import FilePath
class CompatTestCase(unittest.SynchronousTestCase):
"""
Various utility functions in C{twisted.python.compat} provide same
functionality as modern Python variants.
"""
def test_set(self):
"""
L{set} should behave like the expected set interface.
"""
a = set()
a.add('b')
a.add('c')
a.add('a')
b = list(a)
b.sort()
self.assertEqual(b, ['a', 'b', 'c'])
a.remove('b')
b = list(a)
b.sort()
self.assertEqual(b, ['a', 'c'])
a.discard('d')
b = set(['r', 's'])
d = a.union(b)
b = list(d)
b.sort()
self.assertEqual(b, ['a', 'c', 'r', 's'])
def test_frozenset(self):
"""
L{frozenset} should behave like the expected frozenset interface.
"""
a = frozenset(['a', 'b'])
self.assertRaises(AttributeError, getattr, a, "add")
self.assertEqual(sorted(a), ['a', 'b'])
b = frozenset(['r', 's'])
d = a.union(b)
b = list(d)
b.sort()
self.assertEqual(b, ['a', 'b', 'r', 's'])
def test_reduce(self):
"""
L{reduce} should behave like the builtin reduce.
"""
self.assertEqual(15, reduce(lambda x, y: x + y, [1, 2, 3, 4, 5]))
self.assertEqual(16, reduce(lambda x, y: x + y, [1, 2, 3, 4, 5], 1))
class IPv6Tests(unittest.SynchronousTestCase):
"""
C{inet_pton} and C{inet_ntop} implementations support IPv6.
"""
def testNToP(self):
from twisted.python.compat import inet_ntop
f = lambda a: inet_ntop(socket.AF_INET6, a)
g = lambda a: inet_ntop(socket.AF_INET, a)
self.assertEqual('::', f('\x00' * 16))
self.assertEqual('::1', f('\x00' * 15 + '\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70'))
self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
self.assertEqual('100::', f('\x01' + '\x00' * 15))
self.assertEqual('100::1', f('\x01' + '\x00' * 14 + '\x01'))
def testPToN(self):
from twisted.python.compat import inet_pton
f = lambda a: inet_pton(socket.AF_INET6, a)
g = lambda a: inet_pton(socket.AF_INET, a)
self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\x00' * 16, f('::'))
self.assertEqual('\x00' * 16, f('0::0'))
self.assertEqual('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEqual(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae'))
self.assertEqual('\x00' * 14 + '\x00\x01', f('::1'))
self.assertEqual('\x00' * 12 + '\x01\x02\x03\x04', f('::1.2.3.4'))
self.assertEqual(
'\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x01\x02\x03\xff',
f('1:2:3:4:5:6:1.2.3.255'))
for badaddr in ['1:2:3:4:5:6:7:8:', ':1:2:3:4:5:6:7:8', '1::2::3',
'1:::3', ':::', '1:2', '::1.2', '1.2.3.4::',
'abcd:1.2.3.4:abcd:abcd:abcd:abcd:abcd',
'1234:1.2.3.4:1234:1234:1234:1234:1234:1234',
'1.2.3.4']:
self.assertRaises(ValueError, f, badaddr)
if _PY3:
IPv6Tests.skip = "These tests are only relevant to old versions of Python"
class ExecfileCompatTestCase(unittest.SynchronousTestCase):
"""
Tests for the Python 3-friendly L{execfile} implementation.
"""
def writeScript(self, content):
"""
Write L{content} to a new temporary file, returning the L{FilePath}
for the new file.
"""
path = self.mktemp()
with open(path, "wb") as f:
f.write(content.encode("ascii"))
return FilePath(path.encode("utf-8"))
def test_execfileGlobals(self):
"""
L{execfile} executes the specified file in the given global namespace.
"""
script = self.writeScript(u"foo += 1\n")
globalNamespace = {"foo": 1}
execfile(script.path, globalNamespace)
self.assertEqual(2, globalNamespace["foo"])
def test_execfileGlobalsAndLocals(self):
"""
L{execfile} executes the specified file in the given global and local
namespaces.
"""
script = self.writeScript(u"foo += 1\n")
globalNamespace = {"foo": 10}
localNamespace = {"foo": 20}
execfile(script.path, globalNamespace, localNamespace)
self.assertEqual(10, globalNamespace["foo"])
self.assertEqual(21, localNamespace["foo"])
def test_execfileUniversalNewlines(self):
"""
L{execfile} reads in the specified file using universal newlines so
that scripts written on one platform will work on another.
"""
for lineEnding in u"\n", u"\r", u"\r\n":
script = self.writeScript(u"foo = 'okay'" + lineEnding)
globalNamespace = {"foo": None}
execfile(script.path, globalNamespace)
self.assertEqual("okay", globalNamespace["foo"])
class PY3Tests(unittest.SynchronousTestCase):
"""
Identification of Python 2 vs. Python 3.
"""
def test_python2(self):
"""
On Python 2, C{_PY3} is False.
"""
if sys.version.startswith("2."):
self.assertFalse(_PY3)
def test_python3(self):
"""
On Python 3, C{_PY3} is True.
"""
if sys.version.startswith("3."):
self.assertTrue(_PY3)
@comparable
class Comparable(object):
"""
Objects that can be compared to each other, but not others.
"""
def __init__(self, value):
self.value = value
def __cmp__(self, other):
if not isinstance(other, Comparable):
return NotImplemented
return cmp(self.value, other.value)
class ComparableTests(unittest.SynchronousTestCase):
"""
L{comparable} decorated classes emulate Python 2's C{__cmp__} semantics.
"""
def test_equality(self):
"""
Instances of a class that is decorated by C{comparable} support
equality comparisons.
"""
# Make explicitly sure we're using ==:
self.assertTrue(Comparable(1) == Comparable(1))
self.assertFalse(Comparable(2) == Comparable(1))
def test_nonEquality(self):
"""
Instances of a class that is decorated by C{comparable} support
inequality comparisons.
"""
# Make explicitly sure we're using !=:
self.assertFalse(Comparable(1) != Comparable(1))
self.assertTrue(Comparable(2) != Comparable(1))
def test_greaterThan(self):
"""
Instances of a class that is decorated by C{comparable} support
greater-than comparisons.
"""
self.assertTrue(Comparable(2) > Comparable(1))
self.assertFalse(Comparable(0) > Comparable(3))
def test_greaterThanOrEqual(self):
"""
Instances of a class that is decorated by C{comparable} support
greater-than-or-equal comparisons.
"""
self.assertTrue(Comparable(1) >= Comparable(1))
self.assertTrue(Comparable(2) >= Comparable(1))
self.assertFalse(Comparable(0) >= Comparable(3))
def test_lessThan(self):
"""
Instances of a class that is decorated by C{comparable} support
less-than comparisons.
"""
self.assertTrue(Comparable(0) < Comparable(3))
self.assertFalse(Comparable(2) < Comparable(0))
def test_lessThanOrEqual(self):
"""
Instances of a class that is decorated by C{comparable} support
less-than-or-equal comparisons.
"""
self.assertTrue(Comparable(3) <= Comparable(3))
self.assertTrue(Comparable(0) <= Comparable(3))
self.assertFalse(Comparable(2) <= Comparable(0))
class Python3ComparableTests(unittest.SynchronousTestCase):
"""
Python 3-specific functionality of C{comparable}.
"""
def test_notImplementedEquals(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__eq__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)
def test_notImplementedNotEquals(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__ne__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)
def test_notImplementedGreaterThan(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__gt__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__gt__(object()), NotImplemented)
def test_notImplementedLessThan(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__lt__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)
def test_notImplementedGreaterThanEquals(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__ge__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__ge__(object()), NotImplemented)
def test_notImplementedLessThanEquals(self):
"""
Instances of a class that is decorated by C{comparable} support
returning C{NotImplemented} from C{__le__} if it is returned by the
underlying C{__cmp__} call.
"""
self.assertEqual(Comparable(1).__le__(object()), NotImplemented)
if not _PY3:
# On Python 2, we just use __cmp__ directly, so checking detailed
# comparison methods doesn't makes sense.
Python3ComparableTests.skip = "Python 3 only."
class CmpTests(unittest.SynchronousTestCase):
"""
L{cmp} should behave like the built-in Python 2 C{cmp}.
"""
def test_equals(self):
"""
L{cmp} returns 0 for equal objects.
"""
self.assertEqual(cmp(u"a", u"a"), 0)
self.assertEqual(cmp(1, 1), 0)
self.assertEqual(cmp([1], [1]), 0)
def test_greaterThan(self):
"""
L{cmp} returns 1 if its first argument is bigger than its second.
"""
self.assertEqual(cmp(4, 0), 1)
self.assertEqual(cmp(b"z", b"a"), 1)
def test_lessThan(self):
"""
L{cmp} returns -1 if its first argument is smaller than its second.
"""
self.assertEqual(cmp(0.1, 2.3), -1)
self.assertEqual(cmp(b"a", b"d"), -1)
class StringTests(unittest.SynchronousTestCase):
"""
Compatibility functions and types for strings.
"""
def assertNativeString(self, original, expected):
"""
Raise an exception indicating a failed test if the output of
C{nativeString(original)} is unequal to the expected string, or is not
a native string.
"""
self.assertEqual(nativeString(original), expected)
self.assertIsInstance(nativeString(original), str)
def test_nonASCIIBytesToString(self):
"""
C{nativeString} raises a C{UnicodeError} if input bytes are not ASCII
decodable.
"""
self.assertRaises(UnicodeError, nativeString, b"\xFF")
def test_nonASCIIUnicodeToString(self):
"""
C{nativeString} raises a C{UnicodeError} if input Unicode is not ASCII
encodable.
"""
self.assertRaises(UnicodeError, nativeString, u"\u1234")
def test_bytesToString(self):
"""
C{nativeString} converts bytes to the native string format, assuming
an ASCII encoding if applicable.
"""
self.assertNativeString(b"hello", "hello")
def test_unicodeToString(self):
"""
C{nativeString} converts unicode to the native string format, assuming
an ASCII encoding if applicable.
"""
self.assertNativeString(u"Good day", "Good day")
def test_stringToString(self):
"""
C{nativeString} leaves native strings as native strings.
"""
self.assertNativeString("Hello!", "Hello!")
def test_unexpectedType(self):
"""
C{nativeString} raises a C{TypeError} if given an object that is not a
string of some sort.
"""
self.assertRaises(TypeError, nativeString, 1)
def test_unicode(self):
"""
C{compat.unicode} is C{str} on Python 3, C{unicode} on Python 2.
"""
if _PY3:
expected = str
else:
expected = unicode
self.assertTrue(unicodeCompat is expected)
def test_nativeStringIO(self):
"""
L{NativeStringIO} is a file-like object that stores native strings in
memory.
"""
f = NativeStringIO()
f.write("hello")
f.write(" there")
self.assertEqual(f.getvalue(), "hello there")
class NetworkStringTests(unittest.SynchronousTestCase):
"""
Tests for L{networkString}.
"""
def test_bytes(self):
"""
L{networkString} returns a C{bytes} object passed to it unmodified.
"""
self.assertEqual(b"foo", networkString(b"foo"))
def test_bytesOutOfRange(self):
"""
L{networkString} raises C{UnicodeError} if passed a C{bytes} instance
containing bytes not used by ASCII.
"""
self.assertRaises(
UnicodeError, networkString, u"\N{SNOWMAN}".encode('utf-8'))
if _PY3:
test_bytes.skip = test_bytesOutOfRange.skip = (
"Bytes behavior of networkString only provided on Python 2.")
def test_unicode(self):
"""
L{networkString} returns a C{unicode} object passed to it encoded into a
C{bytes} instance.
"""
self.assertEqual(b"foo", networkString(u"foo"))
def test_unicodeOutOfRange(self):
"""
L{networkString} raises L{UnicodeError} if passed a C{unicode} instance
containing characters not encodable in ASCII.
"""
self.assertRaises(
UnicodeError, networkString, u"\N{SNOWMAN}")
if not _PY3:
test_unicode.skip = test_unicodeOutOfRange.skip = (
"Unicode behavior of networkString only provided on Python 3.")
def test_nonString(self):
"""
L{networkString} raises L{TypeError} if passed a non-string object or
the wrong type of string object.
"""
self.assertRaises(TypeError, networkString, object())
if _PY3:
self.assertRaises(TypeError, networkString, b"bytes")
else:
self.assertRaises(TypeError, networkString, u"text")
class ReraiseTests(unittest.SynchronousTestCase):
"""
L{reraise} re-raises exceptions on both Python 2 and Python 3.
"""
def test_reraiseWithNone(self):
"""
Calling L{reraise} with an exception instance and a traceback of
C{None} re-raises it with a new traceback.
"""
try:
1/0
except:
typ, value, tb = sys.exc_info()
try:
reraise(value, None)
except:
typ2, value2, tb2 = sys.exc_info()
self.assertEqual(typ2, ZeroDivisionError)
self.assertTrue(value is value2)
self.assertNotEqual(traceback.format_tb(tb)[-1],
traceback.format_tb(tb2)[-1])
else:
self.fail("The exception was not raised.")
def test_reraiseWithTraceback(self):
"""
Calling L{reraise} with an exception instance and a traceback
re-raises the exception with the given traceback.
"""
try:
1/0
except:
typ, value, tb = sys.exc_info()
try:
reraise(value, tb)
except:
typ2, value2, tb2 = sys.exc_info()
self.assertEqual(typ2, ZeroDivisionError)
self.assertTrue(value is value2)
self.assertEqual(traceback.format_tb(tb)[-1],
traceback.format_tb(tb2)[-1])
else:
self.fail("The exception was not raised.")
class Python3BytesTests(unittest.SynchronousTestCase):
"""
Tests for L{iterbytes}, L{intToBytes}, L{lazyByteSlice}.
"""
def test_iteration(self):
"""
When L{iterbytes} is called with a bytestring, the returned object
can be iterated over, resulting in the individual bytes of the
bytestring.
"""
input = b"abcd"
result = list(iterbytes(input))
self.assertEqual(result, [b'a', b'b', b'c', b'd'])
def test_intToBytes(self):
"""
When L{intToBytes} is called with an integer, the result is an
ASCII-encoded string representation of the number.
"""
self.assertEqual(intToBytes(213), b"213")
def test_lazyByteSliceNoOffset(self):
"""
L{lazyByteSlice} called with some bytes returns a semantically equal version
of these bytes.
"""
data = b'123XYZ'
self.assertEqual(bytes(lazyByteSlice(data)), data)
def test_lazyByteSliceOffset(self):
"""
L{lazyByteSlice} called with some bytes and an offset returns a semantically
equal version of these bytes starting at the given offset.
"""
data = b'123XYZ'
self.assertEqual(bytes(lazyByteSlice(data, 2)), data[2:])
def test_lazyByteSliceOffsetAndLength(self):
"""
L{lazyByteSlice} called with some bytes, an offset and a length returns a
semantically equal version of these bytes starting at the given
offset, up to the given length.
"""
data = b'123XYZ'
self.assertEqual(bytes(lazyByteSlice(data, 2, 3)), data[2:5])
| biddisco/VTK | ThirdParty/Twisted/twisted/test/test_compat.py | Python | bsd-3-clause | 19,199 |
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
Utilities for extracting text from generated classes.
"""
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, str):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise Exception(
'Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
| daniestevez/gr-satellites | docs/doxygen/doxyxml/text.py | Python | gpl-3.0 | 1,276 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Passport Expiration',
'version': '0.1',
'author': 'Savoir-faire Linux',
'maintainer': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'license': 'AGPL-3',
'category': '',
'summary': 'Adds an expiration date for passports',
'description': """
Passport Expiration
============
This module adds the following to employees :
* passport_expiration_date
Contributors:
=============
* Sandy Carter (sandy.carter@savoirfairelinux.com)
""",
'depends': ['hr', ],
'external_dependencies': {
'python': [],
},
'data': [
'hr_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gurneyalex/vertical-travel | passport_expiration/__openerp__.py | Python | agpl-3.0 | 1,802 |
from enum import Enum
class Direction(Enum):
invalid = (0.0, 0.0)
up = (0.0, -1.0)
down = (0.0, 1.0)
left = (-1.0, 0.0)
right = (1.0, 0.0)
def x(self):
return self.value[0]
def y(self):
return self.value[1]
def __str__(self):
return str(self.value)
| Daarknes/Gadakeco | src/util/directions.py | Python | gpl-3.0 | 328 |
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Base class for GTK unit test cases."""
import aeidon
import gaupol
__all__ = ("TestCase",)
class TestCase(aeidon.TestCase):
"""
Base class for GTK unit test cases.
Unit tests are designed to be run with ``py.test``, ``nose`` or something
compatible. Tests should use plain ``assert`` statements to allow multiple
different tools to be used to run the tests.
"""
def new_application(self):
"""Return a new application with one open page."""
application = gaupol.Application()
application.add_page(self.new_page())
application.window.show()
return application
def new_page(self):
"""Return a new page with two open documents."""
page = gaupol.Page()
page.project.open_main(self.new_subrip_file(), "ascii")
page.project.open_translation(self.new_microdvd_file(), "ascii")
return page
def teardown_method(self, method):
"""Remove state set for executing tests in `method`."""
gaupol.util.iterate_main()
for name in ("assistant", "dialog", "window"):
if hasattr(self, name):
getattr(self, name).destroy()
if hasattr(self, "application"):
self.application.window.hide()
self.application.window.destroy()
gaupol.util.iterate_main()
gaupol.conf.restore_defaults()
| otsaloma/gaupol | gaupol/unittest.py | Python | gpl-3.0 | 2,078 |
import base64
import datetime
import json
import os
import re
import unittest2 as unittest
from stacktester import openstack
from stacktester import exceptions
from stacktester.common import ssh
from stacktester.common import utils
class ServersTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.os = openstack.Manager()
self.image_ref = self.os.config.env.image_ref
self.flavor_ref = self.os.config.env.flavor_ref
self.ssh_timeout = self.os.config.nova.ssh_timeout
self.build_timeout = self.os.config.nova.build_timeout
def _assert_server_entity(self, server):
actual_keys = set(server.keys())
expected_keys = set((
'id',
'name',
'hostId',
'status',
'metadata',
'addresses',
'links',
'progress',
'image',
'flavor',
'created',
'updated',
'accessIPv4',
'accessIPv6',
#KNOWN-ISSUE lp804093
'uuid',
))
self.assertTrue(expected_keys <= actual_keys)
server_id = str(server['id'])
host = self.os.config.nova.host
port = self.os.config.nova.port
mgmt_url = self.os.nova.management_url
bmk_url = re.sub(r'v1.1\/', r'', mgmt_url)
self_link = os.path.join(mgmt_url, 'servers', server_id)
bookmark_link = os.path.join(bmk_url, 'servers', server_id)
expected_links = [
{
'rel': 'self',
'href': self_link,
},
{
'rel': 'bookmark',
'href': bookmark_link,
},
]
self.assertEqual(server['links'], expected_links)
def test_build_server_with_file(self):
"""Build a server with an injected file"""
file_contents = 'testing'
expected_server = {
'name': 'stacktester1',
'imageRef': self.image_ref,
'flavorRef': self.flavor_ref,
'personality': [
{
'path': '/etc/test.txt',
'contents': base64.b64encode(file_contents),
},
],
}
post_body = json.dumps({'server': expected_server})
response, body = self.os.nova.request('POST',
'/servers',
body=post_body)
# Verify returned server entity
self.assertEqual(response.status, 202)
_body = json.loads(body)
self.assertEqual(_body.keys(), ['server'])
created_server = _body['server']
admin_pass = created_server.pop('adminPass', None)
self._assert_server_entity(created_server)
self.assertEqual(expected_server['name'], created_server['name'])
# Wait for instance to boot
self.os.nova.wait_for_server_status(created_server['id'],
'ACTIVE',
timeout=self.build_timeout)
server = self.os.nova.get_server(created_server['id'])
# Find IP of server
try:
(_, network) = server['addresses'].popitem()
ip = network[0]['addr']
except KeyError:
self.fail("Failed to retrieve IP address from server entity")
# Assert injected file is on instance, also verifying password works
client = ssh.Client(ip, 'root', admin_pass, self.ssh_timeout)
injected_file = client.exec_command('cat /etc/test.txt')
self.assertEqual(injected_file, file_contents)
# Clean up created server
self.os.nova.delete_server(server['id'])
def test_build_server_with_password(self):
"""Build a server with a password"""
server_password = 'testpwd'
expected_server = {
'name': 'stacktester1',
'imageRef': self.image_ref,
'flavorRef': self.flavor_ref,
'adminPass': server_password,
}
post_body = json.dumps({'server': expected_server})
response, body = self.os.nova.request('POST',
'/servers',
body=post_body)
# Check attributes on the returned entity
self.assertEqual(response.status, 202)
_body = json.loads(body)
self.assertEqual(_body.keys(), ['server'])
created_server = _body['server']
admin_pass = created_server.pop('adminPass', None)
self._assert_server_entity(created_server)
self.assertEqual(expected_server['name'], created_server['name'])
self.assertEqual(expected_server['adminPass'], admin_pass)
# Wait for instance to boot
self.os.nova.wait_for_server_status(created_server['id'],
'ACTIVE',
timeout=self.build_timeout)
server = self.os.nova.get_server(created_server['id'])
# Find IP of server
try:
(_, network) = server['addresses'].popitem()
ip = network[0]['addr']
except KeyError:
self.fail("Failed to retrieve IP address from server entity")
# Assert password was set to that in request
client = ssh.Client(ip, 'root', server_password, self.ssh_timeout)
self.assertTrue(client.test_connection_auth())
# Clean up created server
self.os.nova.delete_server(server['id'])
def test_delete_server_building(self):
"""Delete a server while building"""
# Make create server request
server = {
'name': 'stacktester1',
'imageRef': self.image_ref,
'flavorRef': self.flavor_ref,
}
created_server = self.os.nova.create_server(server)
# Server should immediately be accessible, but in have building status
server = self.os.nova.get_server(created_server['id'])
self.assertEqual(server['status'], 'BUILD')
self.os.nova.delete_server(created_server['id'])
# Poll server until deleted
try:
url = '/servers/%s' % created_server['id']
self.os.nova.poll_request_status('GET', url, 404)
except exceptions.TimeoutException:
self.fail("Server deletion timed out")
def test_build_server(self):
"""Build and manipulate a server"""
# Don't block for the server until later
expected_server = {
'name': 'stacktester1',
'imageRef': self.image_ref,
'flavorRef': self.flavor_ref,
'metadata': {'testEntry': 'testValue'},
}
post_body = json.dumps({'server': expected_server})
response, body = self.os.nova.request('POST',
'/servers',
body=post_body)
# Ensure attributes were returned
self.assertEqual(response.status, 202)
_body = json.loads(body)
self.assertEqual(_body.keys(), ['server'])
created_server = _body['server']
admin_pass = created_server.pop('adminPass')
self._assert_server_entity(created_server)
self.assertEqual(expected_server['name'], created_server['name'])
self.assertEqual(created_server['accessIPv4'], '')
self.assertEqual(created_server['accessIPv6'], '')
self.assertEqual(expected_server['metadata'],
created_server['metadata'])
server_id = created_server['id']
# Get server again and ensure attributes stuck
server = self.os.nova.get_server(server_id)
self._assert_server_entity(server)
self.assertEqual(server['name'], expected_server['name'])
self.assertEqual(server['accessIPv4'], '')
self.assertEqual(server['accessIPv6'], '')
self.assertEqual(server['metadata'], created_server['metadata'])
# Parse last-updated time
update_time = utils.load_isotime(server['updated'])
# Ensure server not returned with future changes-since
future_time = utils.dump_isotime(update_time + datetime.timedelta(1))
params = 'changes-since?%s' % future_time
response, body = self.os.nova.request('GET', '/servers?%s' % params)
servers = json.loads(body)['servers']
self.assertTrue(len(servers) == 0)
# Ensure server is returned with past changes-since
future_time = utils.dump_isotime(update_time - datetime.timedelta(1))
params = 'changes-since?%s' % future_time
response, body = self.os.nova.request('GET', '/servers?%s' % params)
servers = json.loads(body)['servers']
server_ids = map(lambda x: x['id'], servers)
self.assertTrue(server_id in server_ids)
# Update name
new_server = {'name': 'stacktester2'}
put_body = json.dumps({'server': new_server})
url = '/servers/%s' % server_id
resp, body = self.os.nova.request('PUT', url, body=put_body)
# Output from update should be a full server
self.assertEqual(resp.status, 200)
data = json.loads(body)
self.assertEqual(data.keys(), ['server'])
self._assert_server_entity(data['server'])
self.assertEqual('stacktester2', data['server']['name'])
# Check that name was changed
updated_server = self.os.nova.get_server(server_id)
self._assert_server_entity(updated_server)
self.assertEqual('stacktester2', updated_server['name'])
# Update accessIPv4
new_server = {'accessIPv4': '192.168.0.200'}
put_body = json.dumps({'server': new_server})
url = '/servers/%s' % server_id
resp, body = self.os.nova.request('PUT', url, body=put_body)
# Output from update should be a full server
self.assertEqual(resp.status, 200)
data = json.loads(body)
self.assertEqual(data.keys(), ['server'])
self._assert_server_entity(data['server'])
self.assertEqual('192.168.0.200', data['server']['accessIPv4'])
# Check that accessIPv4 was changed
updated_server = self.os.nova.get_server(server_id)
self._assert_server_entity(updated_server)
self.assertEqual('192.168.0.200', updated_server['accessIPv4'])
# Update accessIPv6
new_server = {'accessIPv6': 'feed::beef'}
put_body = json.dumps({'server': new_server})
url = '/servers/%s' % server_id
resp, body = self.os.nova.request('PUT', url, body=put_body)
# Output from update should be a full server
self.assertEqual(resp.status, 200)
data = json.loads(body)
self.assertEqual(data.keys(), ['server'])
self._assert_server_entity(data['server'])
self.assertEqual('feed::beef', data['server']['accessIPv6'])
# Check that accessIPv6 was changed
updated_server = self.os.nova.get_server(server_id)
self._assert_server_entity(updated_server)
self.assertEqual('feed::beef', updated_server['accessIPv6'])
# Check metadata subresource
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEqual(200, response.status)
result = json.loads(body)
expected = {'metadata': {'testEntry': 'testValue'}}
self.assertEqual(expected, result)
# Ensure metadata container can be modified
expected = {
'metadata': {
'new_meta1': 'new_value1',
'new_meta2': 'new_value2',
},
}
post_body = json.dumps(expected)
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('POST', url, body=post_body)
self.assertEqual(200, response.status)
result = json.loads(body)
expected['metadata']['testEntry'] = 'testValue'
self.assertEqual(expected, result)
# Ensure values stick
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEqual(200, response.status)
result = json.loads(body)
self.assertEqual(expected, result)
# Ensure metadata container can be overwritten
expected = {
'metadata': {
'new_meta3': 'new_value3',
'new_meta4': 'new_value4',
},
}
url = '/servers/%s/metadata' % server_id
post_body = json.dumps(expected)
response, body = self.os.nova.request('PUT', url, body=post_body)
self.assertEqual(200, response.status)
result = json.loads(body)
self.assertEqual(expected, result)
# Ensure values stick
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEqual(200, response.status)
result = json.loads(body)
self.assertEqual(expected, result)
# Set specific key
expected_meta = {'meta': {'new_meta5': 'new_value5'}}
put_body = json.dumps(expected_meta)
url = '/servers/%s/metadata/new_meta5' % server_id
response, body = self.os.nova.request('PUT', url, body=put_body)
self.assertEqual(200, response.status)
result = json.loads(body)
self.assertDictEqual(expected_meta, result)
# Ensure value sticks
expected_metadata = {
'metadata': {
'new_meta3': 'new_value3',
'new_meta4': 'new_value4',
'new_meta5': 'new_value5',
},
}
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('GET', url)
result = json.loads(body)
self.assertDictEqual(expected_metadata, result)
# Update existing key
expected_meta = {'meta': {'new_meta4': 'new_value6'}}
put_body = json.dumps(expected_meta)
url = '/servers/%s/metadata/new_meta4' % server_id
response, body = self.os.nova.request('PUT', url, body=put_body)
self.assertEqual(200, response.status)
result = json.loads(body)
self.assertEqual(expected_meta, result)
# Ensure value sticks
expected_metadata = {
'metadata': {
'new_meta3': 'new_value3',
'new_meta4': 'new_value6',
'new_meta5': 'new_value5',
},
}
url = '/servers/%s/metadata' % server_id
response, body = self.os.nova.request('GET', url)
result = json.loads(body)
self.assertDictEqual(expected_metadata, result)
# Delete a certain key
url = '/servers/%s/metadata/new_meta3' % server_id
response, body = self.os.nova.request('DELETE', url)
self.assertEquals(204, response.status)
# Make sure the key is gone
url = '/servers/%s/metadata/new_meta3' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEquals(404, response.status)
# Delete a nonexistant key
url = '/servers/%s/metadata/new_meta3' % server_id
response, body = self.os.nova.request('DELETE', url)
self.assertEquals(404, response.status)
# Wait for instance to boot
server_id = created_server['id']
self.os.nova.wait_for_server_status(server_id,
'ACTIVE',
timeout=self.build_timeout)
# Look for 'addresses' attribute on server
url = '/servers/%s' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEqual(response.status, 200)
body = json.loads(body)
self.assertTrue('addresses' in body['server'].keys())
server_addresses = body['server']['addresses']
# Addresses should be available from subresource
url = '/servers/%s/ips' % server_id
response, body = self.os.nova.request('GET', url)
self.assertEqual(response.status, 200)
body = json.loads(body)
self.assertEqual(body.keys(), ['addresses'])
ips_addresses = body['addresses']
# Ensure both resources return identical information
self.assertEqual(server_addresses, ips_addresses)
# Validate entities within network containers
for (network, network_data) in ips_addresses.items():
url = '/servers/%s/ips/%s' % (server_id, network)
response, body = self.os.nova.request('GET', url)
self.assertEqual(response.status, 200)
body = json.loads(body)
self.assertEqual(body.keys(), [network])
self.assertEqual(body[network], network_data)
# Check each IP entity
for ip_data in network_data:
self.assertEqual(set(ip_data.keys()), set(['addr', 'version']))
# Find IP of server
try:
(_, network) = server_addresses.items()[0]
ip = network[0]['addr']
except KeyError:
self.fail("Failed to retrieve IP address from server entity")
# Assert password works
client = ssh.Client(ip, 'root', admin_pass, self.ssh_timeout)
self.assertTrue(client.test_connection_auth())
# Delete server
url = '/servers/%s' % server_id
response, body = self.os.nova.request('DELETE', url)
self.assertEqual(response.status, 204)
# Poll server until deleted
try:
url = '/servers/%s' % server_id
self.os.nova.poll_request_status('GET', url, 404)
except exceptions.TimeoutException:
self.fail("Server deletion timed out")
def test_create_server_invalid_image(self):
"""Create a server with an unknown image"""
post_body = json.dumps({
'server': {
'name': 'stacktester1',
'imageRef': -1,
'flavorRef': self.flavor_ref,
}
})
resp, body = self.os.nova.request('POST', '/servers', body=post_body)
self.assertEqual(400, resp.status)
fault = json.loads(body)
expected_fault = {
"badRequest": {
"message": "Cannot find requested image",
"code": 400,
},
}
# KNOWN-ISSUE - The error message is confusing and should be improved
#self.assertEqual(fault, expected_fault)
def test_create_server_invalid_flavor(self):
"""Create a server with an unknown flavor"""
post_body = json.dumps({
'server': {
'name': 'stacktester1',
'imageRef': self.image_ref,
'flavorRef': -1,
}
})
resp, body = self.os.nova.request('POST', '/servers', body=post_body)
self.assertEqual(400, resp.status)
fault = json.loads(body)
expected_fault = {
"badRequest": {
"message": "Cannot find requested flavor",
"code": 400,
},
}
# KNOWN-ISSUE lp804084
#self.assertEqual(fault, expected_fault)
| rackspace-titan/stacktester | stacktester/tests/test_servers.py | Python | apache-2.0 | 19,355 |
#!/usr/bin/env python
import codecs
import json
import os
import time
from weibo import Client
import config
if __name__ == '__main__':
c = Client(config.WEIBO_API_KEY, config.WEIBO_API_SECRET,
config.REDIRECT_URI, token=config.TOKEN)
timeline = c.get('statuses/public_timeline', count=200)
gmt = time.gmtime()
path = '%s/%s' % (config.DATA_DIR, time.strftime('%Y/%m/%d/%H', gmt))
# create that directory to hold these files if it doesn't yet exist
try:
os.stat(path)
except:
os.makedirs(path)
fname_prefix = time.strftime('%Y%m%d-%H%M%SZ', gmt)
fname_json = '%s/%s.json' % (path, fname_prefix)
fname_txt = '%s/%s.txt' % (path, fname_prefix)
# write out the raw data, as utf8-encoded JSON
fp_json = codecs.open(fname_json, 'wb', encoding='utf-8')
json.dump(timeline, fp_json, indent=2, ensure_ascii=False,
encoding='utf-8')
fp_json.close()
os.system('gzip %s' % fname_json)
# write out the same data, but only needed values as delimited text
fp_txt = codecs.open(fname_txt, 'wb', encoding='utf-8')
for status in timeline['statuses']:
row = [status['mid'], status['created_at'],
status['user']['city'], status['user']['province'],
status['user']['location'],
str(status['user']['followers_count']),
str(status['user']['friends_count']),
str(status['user']['bi_followers_count']),
status['user']['gender'],
str(status['user']['statuses_count']),
status['text']]
fp_txt.write(u'\t'.join(row) + '\n')
fp_txt.close()
os.system('gzip %s' % fname_txt)
| gwu-libraries/ywow | fetch.py | Python | mit | 1,713 |
"""
Load WKB into pysal shapes.
Where pysal shapes support multiple parts,
"MULTI"type shapes will be converted to a single multi-part shape:
MULTIPOLYGON -> Polygon
MULTILINESTRING -> Chain
Otherwise a list of shapes will be returned:
MULTIPOINT -> [pt0, ..., ptN]
Some concepts aren't well supported by pysal shapes.
For example:
wkt = 'MULTIPOLYGON EMPTY' -> '\x01 \x06\x00\x00\x00 \x00\x00\x00\x00'
| < | WKBMultiPolygon | 0 parts |
pysal.cg.Polygon does not support 0 part polygons.
None is returned in this case.
REFERENCE MATERIAL:
SOURCE: http://webhelp.esri.com/arcgisserver/9.3/dotNet/index.htm#geodatabases/the_ogc_103951442.htm
Basic Type definitions
byte : 1 byte
uint32 : 32 bit unsigned integer (4 bytes)
double : double precision number (8 bytes)
Building Blocks : Point, LinearRing
"""
from cStringIO import StringIO
from pysal import cg
import sys
import array
import struct
__author__ = 'Charles R Schmidt <schmidtc@gmail.com>'
__all__ = ['loads']
"""
enum wkbByteOrder {
wkbXDR = 0, Big Endian
wkbNDR = 1 Little Endian
};
"""
DEFAULT_ENDIAN = '<' if sys.byteorder == 'little' else '>'
ENDIAN = {'\x00': '>', '\x01': '<'}
def load_ring_little(dat):
"""
LinearRing {
uint32 numPoints;
Point points[numPoints];
}
"""
npts = struct.unpack('<I', dat.read(4))[0]
xy = struct.unpack('<%dd'%(npts*2), dat.read(npts*2*8))
return [cg.Point(xy[i:i+2]) for i in xrange(0,npts*2,2)]
def load_ring_big(dat):
npts = struct.unpack('>I', dat.read(4))[0]
xy = struct.unpack('>%dd'%(npts*2), dat.read(npts*2*8))
return [cg.Point(xy[i:i+2]) for i in xrange(0,npts*2,2)]
def loads(s):
"""
WKBGeometry {
union {
WKBPoint point;
WKBLineString linestring;
WKBPolygon polygon;
WKBGeometryCollection collection;
WKBMultiPoint mpoint;
WKBMultiLineString mlinestring;
WKBMultiPolygon mpolygon;
}
};
"""
# To allow recursive calls, read only the bytes we need.
if hasattr(s, 'read'):
dat = s
else:
dat = StringIO(s)
endian = ENDIAN[dat.read(1)]
typ = struct.unpack('I', dat.read(4))[0]
if typ == 1:
"""
WKBPoint {
byte byteOrder;
uint32 wkbType; 1
Point point;
}
Point {
double x;
double y;
};
"""
x,y = struct.unpack(endian+'dd', dat.read(16))
return cg.Point((x,y))
elif typ == 2:
"""
WKBLineString {
byte byteOrder;
uint32 wkbType; 2
uint32 numPoints;
Point points[numPoints];
}
"""
n = struct.unpack(endian+'I', dat.read(4))[0]
xy = struct.unpack(endian+'%dd'%(n*2), dat.read(n*2*8))
return cg.Chain([cg.Point(xy[i:i+2]) for i in xrange(0,n*2,2)])
elif typ == 3:
"""
WKBPolygon {
byte byteOrder;
uint32 wkbType; 3
uint32 numRings;
LinearRing rings[numRings];
}
WKBPolygon has exactly 1 outer ring and n holes.
multipart Polygons are NOT support by WKBPolygon.
"""
nrings = struct.unpack(endian+'I', dat.read(4))[0]
load_ring = load_ring_little if endian == '<' else load_ring_big
rings = [load_ring(dat) for _ in xrange(nrings)]
return cg.Polygon(rings[0], rings[1:])
elif typ == 4:
"""
WKBMultiPoint {
byte byteOrder;
uint32 wkbType; 4
uint32 num_wkbPoints;
WKBPoint WKBPoints[num_wkbPoints];
}
"""
npts = struct.unpack(endian+'I', dat.read(4))[0]
return [loads(dat) for _ in xrange(npts)]
elif typ == 5:
"""
WKBMultiLineString {
byte byteOrder;
uint32 wkbType; 5
uint32 num_wkbLineStrings;
WKBLineString WKBLineStrings[num_wkbLineStrings];
}
"""
nparts = struct.unpack(endian+'I', dat.read(4))[0]
chains = [loads(dat) for _ in xrange(nparts)]
return cg.Chain(sum([c.parts for c in chains],[]))
elif typ == 6:
"""
wkbMultiPolygon {
byte byteOrder;
uint32 wkbType; 6
uint32 num_wkbPolygons;
WKBPolygon wkbPolygons[num_wkbPolygons];
}
"""
npolys = struct.unpack(endian+'I', dat.read(4))[0]
polys = [loads(dat) for _ in xrange(npolys)]
parts = sum([p.parts for p in polys], [])
holes = sum([p.holes for p in polys if p.holes[0]], [])
# MULTIPOLYGON EMPTY, isn't well supported by pysal shape types.
if not parts:
return None
return cg.Polygon(parts, holes)
elif typ == 7:
"""
WKBGeometryCollection {
byte byte_order;
uint32 wkbType; 7
uint32 num_wkbGeometries;
WKBGeometry wkbGeometries[num_wkbGeometries]
}
"""
ngeoms = struct.unpack(endian+'I', dat.read(4))[0]
return [loads(dat) for _ in xrange(ngeoms)]
raise TypeError('Type (%d) is unknown or unsupported.'%typ)
if __name__ == '__main__':
# TODO: Refactor below into Unit Tests
wktExamples = ['POINT(6 10)',
'LINESTRING(3 4,10 50,20 25)',
'POLYGON((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2))',
'MULTIPOINT(3.5 5.6,4.8 10.5)',
'MULTILINESTRING((3 4,10 50,20 25),(-5 -8,-10 -8,-15 -4))',
# This MULTIPOLYGON is not valid, the 2nd shell instects the 1st.
#'MULTIPOLYGON(((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2)),((3 3,6 2,6 4,3 3)))',
'MULTIPOLYGON(((1 1,5 1,5 5,1 5,1 1),(2 2, 3 2, 3 3, 2 3,2 2)),((5 3,6 2,6 4,5 3)))',
'GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))',
#'POINT ZM (1 1 5 60)', <-- ZM is not supported by WKB ?
#'POINT M (1 1 80)', <-- M is not supported by WKB ?
#'POINT EMPTY', <-- NOT SUPPORT
'MULTIPOLYGON EMPTY']
# shapely only used for testing.
try:
import shapely.wkt, shapely.geometry
from pysal.contrib.shapely_ext import to_wkb
except ImportError:
print "shapely is used to test this module."
raise
for example in wktExamples:
print example
shape0= shapely.wkt.loads(example)
shape1 = loads(shape0.to_wkb())
if example.startswith('MULTIPOINT'):
shape2 = shapely.geometry.asMultiPoint(shape1)
elif example.startswith('GEOMETRYCOLLECTION'):
shape2 = shapely.geometry.collection.GeometryCollection(map(shapely.geometry.asShape,shape1))
elif example == 'MULTIPOLYGON EMPTY':
#Skip Test
shape2 = None
else:
shape2 = shapely.geometry.asShape(shape1)
print shape1
if shape2:
assert shape0.equals(shape2)
print shape0.equals(shape2)
else:
print "Skip"
print ""
| sjsrey/pysal_core | pysal_core/io/util/wkb.py | Python | bsd-3-clause | 7,872 |
__author__ = 'megabytephreak'
from rdl_lexer import RdlLexer, RdlToken
from ply import yacc
from ply.lex import LexToken
import rdl_ast
from rdlcompiler.colorize import colorize, RED
from rdlcompiler.logger import logger
def make_list_prod(prod, tprod):
def rule(self, p):
if len(p) == 3:
p[0] = p[1] + [p[2]]
else:
p[0] = []
rule.__doc__ = "{0} : {0} {1} \n |".format(prod, tprod)
return rule
def make_oneof_prod(prod, prods):
def rule(self, p):
p[0] = p[1]
lines = ['%s : %s' % (prod, prods[0])]
lines += prods[1:]
rule.__doc__ = '\n| '.join(lines)
return rule
class RdlParser(RdlLexer):
start = 'root'
p_root = make_list_prod('root', 'elem')
p_elem = make_oneof_prod('elem', ['enum_def', 'comp_def',
'anon_comp_inst', 'comp_inst',
'default_prop_assign', 'prop_assign', 'dynamic_prop_assign'])
def p_comp_def(self, p):
""" comp_def : COMPTYPE id LBRACE comp_body RBRACE SEMI
"""
p[0] = rdl_ast.CompDef(p[1].value, p[2].value, p[4])
def p_anon_comp_inst(self, p):
""" anon_comp_inst : int_ext COMPTYPE LBRACE comp_body RBRACE comp_inst_elems SEMI
| COMPTYPE LBRACE comp_body RBRACE comp_inst_elems SEMI
"""
if len(p) > 7:
p[0] = rdl_ast.AnonCompInst(p[2].value, p[4], p[6], p[1])
else:
p[0] = rdl_ast.AnonCompInst(p[1].value, p[3], p[5], None)
# def p_anon_comp_inst_error(self, p):
# """ anon_comp_inst : int_ext COMPTYPE LBRACE error RBRACE comp_inst_elems SEMI
# | COMPTYPE LBRACE error RBRACE comp_inst_elems SEMI
# """
#
# self.syntax_error("Syntax Error in component body, missing semicolon?", p[3])
def p_comp_inst(self, p):
""" comp_inst : int_ext id comp_inst_elems SEMI
| ID comp_inst_elems SEMI
"""
intext = None
if len(p) > 4:
p[0] = rdl_ast.CompInst(p[2].value, p[3], p[1])
else:
p[0] = rdl_ast.CompInst(p[1].value, p[2])
def p_comp_inst_error(self, p):
""" comp_inst : int_ext error SEMI
| id error SEMI
"""
self.syntax_error("Syntax Error in component instantiation", p[1])
def p_int_ext(self, p):
""" int_ext : INTEXT
"""
p[0] = p[1].value
p_comp_body = make_list_prod('comp_body', 'comp_elem')
p_comp_elem = make_oneof_prod('comp_elem', ['enum_def', 'comp_def',
'anon_comp_inst', 'comp_inst',
'default_prop_assign', 'prop_assign', 'dynamic_prop_assign'])
def p_comp_inst_elems(self, p):
""" comp_inst_elems : comp_inst_elems COMMA comp_inst_elem
| comp_inst_elem
"""
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_comp_inst_elem(self, p):
""" comp_inst_elem : id array_decl reset_value addr_alloc
"""
p[0] = rdl_ast.InstParams(p[1].value, p[2], p[3], p[4])
def p_array_decl(self, p):
""" array_decl : LSQ numeric RSQ
| LSQ numeric COLON numeric RSQ
|
"""
if len(p) == 4:
p[0] = p[2]
elif len(p) == 6:
p[0] = (p[2], p[4])
else:
p[0] = None
def p_reset_value(self, p):
""" reset_value : EQ sized_numeric
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_addr_alloc(self, p):
""" addr_alloc : alloc_pos alloc_inc
"""
p[0] = (p[1], p[2])
def p_alloc_pos(self, p):
""" alloc_pos : AT numeric
| MOD numeric
|
"""
if len(p) > 1:
p[0] = (p[1].value, p[2])
else:
p[0] = None
def p_alloc_inc(self, p):
""" alloc_inc : INC numeric
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_propname(self, p):
""" propname : PROPNAME
| PRECEDENCETYPE
"""
p[0] = p[1].value
def p_default_prop_assign(self, p):
""" default_prop_assign : DEFAULT prop_assign
"""
p[2].set_default = True
p[0] = p[2]
def p_prop_assign_0(self, p):
""" prop_assign : propname maybe_value SEMI
"""
p[0] = rdl_ast.PropAssign(p[1], p[2])
def p_pop_assign_1(self, p):
""" prop_assign : NONSTICKY INTRMOD propname maybe_value SEMI
| INTRMOD propname maybe_value SEMI
| NONSTICKY propname maybe_value SEMI
"""
if len(p) == 6:
p[0] = rdl_ast.IntrPropAssign(p[3], p[4], (p[1].value, p[2].value))
else:
p[0] = rdl_ast.IntrPropAssign(p[2], p[3], (p[1].value,))
def p_dynamic_prop_assign(self, p):
""" dynamic_prop_assign : instance_ref maybe_value SEMI
"""
p[0] = rdl_ast.PropAssign(p[1], p[2])
def p_maybe_value(self, p):
""" maybe_value : EQ value
|
"""
if len(p) == 1:
p[0] = True
else:
p[0] = p[2]
def p_enum_def(self, p):
""" enum_def : ENUM ID LBRACE enum_body RBRACE SEMI
"""
p[0] = rdl_ast.EnumDef(p[2].value, p[4])
p_enum_body = make_list_prod('enum_body', 'encoding')
def p_encoding(self, p):
""" encoding : ID EQ sized_numeric SEMI
| ID EQ sized_numeric LBRACE enum_props RBRACE SEMI
"""
if len(p) == 5:
p[0] = rdl_ast.EnumEncoding(p[1].value, p[3], [])
else:
p[0] = rdl_ast.EnumEncoding(p[1].value, p[3], p[5])
p_enum_props = make_list_prod('enum_props', 'prop_assign')
p_value = make_oneof_prod('value', ['instance_ref', 'literal'])
def p_instance_ref(self, p):
""" instance_ref : instance_ref_path
| instance_ref_path DEREF propname
"""
p[0] = rdl_ast.InstanceRef(p[1])
if len(p) > 2:
p[0].set_prop(p[3])
def p_instance_ref_path(self, p):
""" instance_ref_path : instance_ref_elem
| instance_ref_path DOT instance_ref_elem
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_instance_ref_elem(self, p):
""" instance_ref_elem : ID
| ID LSQ numeric RSQ
"""
if len(p) == 2:
p[0] = p[1].value
else:
p[0] = rdl_ast.Subscript(p[1].value, p[3])
def p_literal_1(self, p):
""" literal : STRING
"""
p[0] = p[1].value
def p_literal_2(self, p):
""" literal : TF
"""
if p[1].value == 'true':
p[0] = True
else:
p[0] = False
def p_literal_3(self, p):
""" literal : ACCESSTYPE
"""
p[0] = rdl_ast.AccessType(p[1].value)
def p_literal_4(self, p):
""" literal : ADDRESSTYPE
"""
p[0] = rdl_ast.AddressingType(p[1].value)
def p_literal_5(self, p):
""" literal : PRECEDENCETYPE
"""
p[0] = rdl_ast.PrecedenceType(p[1].value)
def p_literal_6(self, p):
""" literal : numeric
"""
p[0] = p[1]
def p_numeric_0(self, p):
""" numeric : NUM
"""
p[0] = p[1].value
def p_numeric_1(self, p):
""" numeric : VNUM"""
p[0] = p[1].value
def p_sized_numeric_1(self, p):
""" sized_numeric : VNUM"""
p[0] = p[1].value
def p_sized_numeric_2(self, p):
""" sized_numeric : NUM
"""
if p[1].value != 0:
self.syntax_error("Expected sized_numeric (Verilog-style numeric literal or 0)", p[1])
p[0] = (-1, p[1].value)
def p_id(self, p):
""" id : ID
"""
p[0] = p[1];
def p_id_error(self, p):
""" id : PROPNAME
| INTRMOD
"""
self.syntax_error(
"Identifier '%s' is a keyword name. If you need to use this name, please use the escaped form '\\%s'."
% (p[1].value, p[1].value), p[1])
raise SyntaxError
def p_error(self, t):
if t:
prev = self._parser.symstack[-1]
if type(prev) == LexToken:
msg = "Unexpected %s after %s." % (t.type, prev.type)
else:
print prev
msg = "Unexpected %s." % t.type
self.syntax_error(msg, t)
while 1:
tok = self.token() # Get the next token
if not tok or tok.type == 'SEMI': break
self._parser.errok()
return tok
else:
msg = "Unexpected end of file"
self._parser.restart()
self.syntax_error(msg, t)
def syntax_error(self, message, token):
if token is not None:
if not isinstance(token, RdlToken):
token = RdlToken(token)
logger.log_line(self.format_line_message(token, colorize(RED, "Error: ") + message))
else:
logger.log_line("Error: " + message)
self.syntax_errors += 1
def __init__(self, debug=False):
RdlLexer.__init__(self)
self.syntax_errors = 0
self._parser = yacc.yacc(module=self,
debug=debug,
tabmodule=None, # Don't try to precomputed parse table from a file
write_tables=False, # No point in saving the parse tables then either
optimize=False, # Yet another part of avoiding the parse table loading
)
def parse(self, string):
string = string.expandtabs(4)
return self._parser.parse(string, lexer=self, debug=self.debug)
| MegabytePhreak/rdl | rdlcompiler/systemrdl/rdl_parser.py | Python | mit | 10,300 |
from enum import IntEnum
from django.contrib.auth.models import AbstractUser
from django.db import models
class Role(IntEnum):
Player = 0
Contributor = 1
Master = 2
ROLE_CHOICES = (
(0, "Player"),
(1, "Contributor"),
(2, "Master"),
)
class User(AbstractUser):
role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, default=0)
image = models.ImageField(null=True, blank=True)
| pennomi/brimstone-website | apps/accounts/models.py | Python | agpl-3.0 | 421 |
from . import define
# Internal
define("api_version",
default="0.2",
help="Service API version to return to the users in header X-API-Version",
type=str)
define("internal_restrict",
default=["127.0.0.1/24", "::1/128"],
help="An addresses considered internal (can be multiple). Requests from those are allowed to do everything, "
"so adding public address is dangerous.",
group="internal",
multiple=True,
type=str)
define("internal_broker",
default="amqp://guest:guest@127.0.0.1:5672/",
help="RabbitMQ messages broker location (amqp).",
group="internal",
type=str)
define("internal_max_connections",
default=1,
help="Maximum connections for internal broker (connection pool).",
group="internal",
type=int)
define("internal_channel_prefetch_count",
default=1024,
help="Channel prefetch for internal broker (how many a consumer may prefetch for processing).",
group="internal",
type=int)
# Token cache
define("token_cache_host",
default="127.0.0.1",
help="Location of access token cache (redis).",
group="token_cache",
type=str)
define("token_cache_port",
default=6379,
help="Port of access token cache (redis).",
group="token_cache",
type=int)
define("token_cache_db",
default=9,
help="Database of access token cache (redis).",
group="token_cache",
type=int)
define("token_cache_max_connections",
default=500,
help="Maximum connections to the token cache (connection pool).",
group="token_cache",
type=int)
# Discovery
define("discovery_service",
default="http://localhost:9502",
help="Discovery service location (if applicable).",
group="discovery",
type=str)
# Pub/sub
define("pubsub",
default="amqp://guest:guest@127.0.0.1:5672/",
help="Location of rabbitmq server for pub/sub operations.",
type=str)
# Keys
define("auth_key_public",
default="../.anthill-keys/anthill.pub",
help="Location of public key required for access token verification.",
type=str)
# Monitoring
define("enable_monitoring",
default=False,
help="Use monitoring or not.",
group="monitoring",
type=bool)
define("monitoring_host",
default="127.0.0.1",
help="Monitoring server location (InfluxDB).",
group="monitoring",
type=str)
define("monitoring_port",
default=8086,
help="Monitoring server port (InfluxDB).",
group="monitoring",
type=int)
define("monitoring_db",
default="dev",
help="Monitoring server database name (InfluxDB).",
group="monitoring",
type=str)
define("monitoring_username",
default="",
help="Monitoring server username name (InfluxDB).",
group="monitoring",
type=str)
define("monitoring_password",
default="",
help="Monitoring server password name (InfluxDB).",
group="monitoring",
type=str)
# Static content
define("serve_static",
default=True,
help="Should service serve /static files or should it be done by reverse proxy",
type=bool)
# Other
define("debug",
default=False,
help="Is debug mode enabled (includes full stack trace)",
type=bool)
| anthill-services/anthill-common | anthill/common/options/default.py | Python | mit | 3,407 |
# -*- coding: utf-8 -*-
"""
babel.messages.frontend
~~~~~~~~~~~~~~~~~~~~~~~
Frontends for the message extraction functionality.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import logging
import optparse
import os
import re
import shutil
import sys
import tempfile
from datetime import datetime
from locale import getpreferredencoding
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel._compat import StringIO, string_types
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import DEFAULT_KEYWORDS, DEFAULT_MAPPING, check_and_call_extract_file, extract_from_dir
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.util import LOCALTZ, odict
from distutils import log as distutils_log
from distutils.cmd import Command as _Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
try:
from ConfigParser import RawConfigParser
except ImportError:
from configparser import RawConfigParser
class Command(_Command):
# This class is a small shim between Distutils commands and
# optparse option parsing in the frontend command line.
#: Option name to be input as `args` on the script command line.
as_args = None
#: Options which allow multiple values.
multiple_value_options = ()
#: Log object. To allow replacement in the script command line runner.
log = distutils_log
def __init__(self, dist=None):
# A less strict version of distutils' `__init__`.
self.distribution = dist
self.initialize_options()
self._dry_run = None
self.verbose = False
self.force = None
self.help = 0
self.finalized = 0
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
.. versionadded:: 0.9
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domains of PO files (space separated list, default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the output file '
'or the base directory')
def run(self):
domains = self.domain.split()
for domain in domains:
self._run_domain(domain)
def _run_domain(self, domain):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'rb')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated += 1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info(
'%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file
)
if catalog.fuzzy and not self.use_fuzzy:
self.log.info('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error(
'error: %s:%d: %s', po_file, message.lineno, error
)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file (default "utf-8")'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('project=', None,
'set project name in output'),
('version=', None,
'set project version in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Separate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-paths=', None,
'files or directories that should be scanned for messages. Separate multiple '
'files or directories with commas(,)'),
('input-dirs=', None, # TODO (3.x): Remove me.
'alias for input-paths (does allow files as well as directories).'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
as_args = 'input-paths'
multiple_value_options = ('add-comments',)
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.input_paths = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.project = None
self.version = None
self.add_comments = None
self.strip_comments = False
def finalize_options(self):
if self.input_dirs:
if not self.input_paths:
self.input_paths = self.input_dirs
else:
raise DistutilsOptionError(
'input-dirs and input-paths are mutually exclusive'
)
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if self.input_paths:
if isinstance(self.input_paths, string_types):
self.input_paths = re.split(',\s*', self.input_paths)
else:
self.input_paths = dict.fromkeys([
k.split('.', 1)[0]
for k in (self.distribution.packages or ())
]).keys()
if not self.input_paths:
raise DistutilsOptionError("no input files or directories specified")
for path in self.input_paths:
if not os.path.exists(path):
raise DistutilsOptionError("Input path: %s does not exist" % path)
if self.add_comments:
if isinstance(self.add_comments, string_types):
self.add_comments = self.add_comments.split(',')
else:
self.add_comments = []
if self.distribution:
if not self.project:
self.project = self.distribution.get_name()
if not self.version:
self.version = self.distribution.get_version()
def run(self):
mappings = self._get_mappings()
with open(self.output_file, 'wb') as outfile:
catalog = Catalog(project=self.project,
version=self.version,
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for path, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
# If we explicitly provide a full filepath, just use that.
# Otherwise, path will be the directory path and filename
# is the relative path from that dir to the file.
# So we can join those to get the full filepath.
if os.path.isfile(path):
filepath = path
else:
filepath = os.path.normpath(os.path.join(path, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath, optstr)
if os.path.isfile(path):
current_dir = os.getcwd()
extracted = check_and_call_extract_file(
path, method_map, options_map,
callback, self._keywords, self.add_comments,
self.strip_comments, current_dir
)
else:
extracted = extract_from_dir(
path, method_map, options_map,
keywords=self._keywords,
comment_tags=self.add_comments,
callback=callback,
strip_comment_tags=self.strip_comments
)
for filename, lineno, message, comments, context in extracted:
if os.path.isfile(path):
filepath = filename # already normalized
else:
filepath = os.path.normpath(os.path.join(path, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
self.log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for path in self.input_paths:
mappings[path] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for path, mapping in message_extractors.items():
if isinstance(mapping, string_types):
method_map, options_map = parse_mapping(StringIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[path] = method_map, options_map
else:
for path in self.input_paths:
mappings[path] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
]
boolean_options = ['no-wrap']
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
self.no_wrap = False
self.width = None
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError as e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
def run(self):
self.log.info(
'creating catalog %r based on %r', self.output_file, self.input_file
)
infile = open(self.input_file, 'rb')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.revision_date = datetime.now(LOCALTZ)
catalog.fuzzy = False
outfile = open(self.output_file, 'wb')
try:
write_po(outfile, catalog, width=self.width)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
.. versionadded:: 0.9
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('update-header-comment', None,
'update target header comment'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous', 'update_header_comment']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.width = None
self.no_wrap = False
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.update_header_comment = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'rb')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename, self.input_file)
infile = open(filename, 'rb')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(
template, self.no_fuzzy_matching,
update_header_comment=self.update_header_comment
)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'wb')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous, width=self.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
command_classes = {
'compile': compile_catalog,
'extract': extract_messages,
'init': init_catalog,
'update': update_catalog,
}
def run(self, argv=None):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
if argv is None:
argv = sys.argv
self.parser = optparse.OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = localedata.locale_identifiers()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u'%%-%ds %%s' % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print(output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace'))
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return self._dispatch(cmdname, args[1:])
def _configure_logging(self, loglevel):
self.log = logging.getLogger('babel')
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
def _help(self):
print(self.parser.format_help())
print("commands:")
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = sorted(self.commands.items())
for name, description in commands:
print(format % (name, description))
def _dispatch(self, cmdname, argv):
"""
:type cmdname: str
:type argv: list[str]
"""
cmdclass = self.command_classes[cmdname]
cmdinst = cmdclass()
cmdinst.log = self.log # Use our logger, not distutils'.
assert isinstance(cmdinst, Command)
cmdinst.initialize_options()
parser = optparse.OptionParser(
usage=self.usage % (cmdname, ''),
description=self.commands[cmdname]
)
as_args = getattr(cmdclass, "as_args", ())
for long, short, help in cmdclass.user_options:
name = long.strip("=")
default = getattr(cmdinst, name.replace('-', '_'))
strs = ["--%s" % name]
if short:
strs.append("-%s" % short)
if name == as_args:
parser.usage += "<%s>" % name
elif name in cmdclass.boolean_options:
parser.add_option(*strs, action="store_true", help=help)
elif name in cmdclass.multiple_value_options:
parser.add_option(*strs, action="append", help=help)
else:
parser.add_option(*strs, help=help, default=default)
options, args = parser.parse_args(argv)
if as_args:
setattr(options, as_args.replace('-', '_'), args)
for key, value in vars(options).items():
setattr(cmdinst, key, value)
try:
cmdinst.ensure_finalized()
except DistutilsOptionError as err:
parser.error(str(err))
cmdinst.run()
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = StringIO('''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = sorted(parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items())
>>> for keyword, indices in kw:
... print((keyword, indices))
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(','):
if x[-1] == 'c':
inds.append((int(x[:-1]), 'c'))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
| iamshubh22/babel | babel/messages/frontend.py | Python | bsd-3-clause | 35,276 |
"""
Celery task management.
http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html#using-celery-with-django
https://realpython.com/blog/python/asynchronous-tasks-with-django-and-celery/
"""
from __future__ import absolute_import, unicode_literals
import os
# from . import settings
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teweb.settings')
app = Celery('teweb')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY') # celery 4
# app.config_from_object('django.conf:settings')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks() # celery 4
# app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| matthiaskoenig/tellurium-web | teweb/teweb/celery.py | Python | lgpl-3.0 | 1,083 |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os, sys
import logging
import subprocess
import shlex
logger = logging.getLogger(__name__)
def run_cmd(cmd):
logger.info("Running: " + cmd)
subprocess.check_call(cmd, shell=True)
class Pipeliner(object):
_checkpoint_dir = None
_cmds_list = []
def __init__(self, checkpoint_dir):
checkpoint_dir = os.path.abspath(checkpoint_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self._checkpoint_dir = checkpoint_dir
def add_commands(self, cmds_list):
for cmd in cmds_list:
self._cmds_list.append(cmd)
def num_cmds(self):
return len(self._cmds_list)
def run(self):
for cmd in self._cmds_list:
checkpoint_file = os.path.sep.join([self._checkpoint_dir, cmd.get_checkpoint()])
if os.path.exists(checkpoint_file):
logger.info("CMD: " + cmd.get_cmd() + " already processed. Skipping.")
else:
# execute it. If it succeeds, make the checkpoint file
run_cmd(cmd.get_cmd())
run_cmd("touch {}".format(checkpoint_file))
# since all commands executed successfully, remove them from the current cmds list
self._cmds_list = list()
class Command(object):
def __init__(self, cmd, checkpoint):
self._cmd = cmd
self._checkpoint = checkpoint
def get_cmd(self):
return self._cmd
def get_checkpoint(self):
return self._checkpoint
| DISCASM/DISCASM | PyLib/Pipeliner.py | Python | bsd-3-clause | 1,704 |
import glob
import json
import csv
from models.company import company
from models.policies import policies_model
from sim.train_sklearn import train_sklearn
from sim.model_sklearn import regression_sklearn
from sim.model_sklearn import classifier_sklearn
class TestGeneration:
all_risks = ["bruteforce", "stolen"] # this needs to be derived from actual incident json files
default_context = {
'employees': company.employee_types,
'locations': company.location_types,
'devices': company.device_types}
entries = [{
"pdata": 0,
"bdata": 0,
"plen": 8,
"psets": 1,
"pdict": 1,
"phist": 2,
"prenew": 2,
"pattempts": 0,
"precovery": 0
}]
def setup_method(self, method):
self.trainer = train_sklearn()
self.classifier = classifier_sklearn()
self.regression = regression_sklearn()
"""
Tests the creation of permutations for the needs of training of implicit model
"""
def test_enumerations(self):
assert len(self.trainer.enum_policy_contexts()) == 27
product = 1
for next in policies_model.get_bounds():
product = product * len(policies_model.get_bounds()[next])
assert len(self.trainer.enum_samples({})) == product
def test_training_data(self):
for next in self.entries:
print next
assert self.classifier.predict_data(next) == 'classifier'
print self.classifier.predict_data(next)
def generate_testing_set(self):
"""
Generalizes the incidents into a training set to be used by the implicit model.
This assumes the types of risks
"""
result = []
# read incidents and generate training sets
for ref in glob.glob('static/incidents/*.json'):
incident_file = open(ref)
incident = json.load(incident_file)
incident_file.close()
risks = self.all_risks if incident["type"] == 'general' else [incident["type"]]
policy = incident["policy"]
cls = incident["id"]
value = incident["risk"]
# fill missing part of context, assuming that omitting something means all possible values
if "context" in incident:
context = incident["context"]
for key in self.default_context:
if key not in context:
context[key] = self.default_context[key]
else:
context = self.default_context
# complete policy with neutral values
sample = policies_model.get_neutral() # no need to copy, because call already returns a new instance
sample.update(policy)
# add classification last column
data = policies_model.policy2datapoint(sample)
self.trainer = train_sklearn()
self.classifier = classifier_sklearn()
self.regression = regression_sklearn()
line = []
line.append(incident["name"])
line.append(incident["type"])
line.append(cls)
line.append(value)
print str(incident["name"]) + " policy: " + str(data) + " context: " + str(context) + " risks: " + str(risks)
for risk in self.all_risks:
for employee in context['employees']:
for location in context['locations']:
for device in context['devices']:
if risk in risks and employee in self.default_context['employees'] and location in self.default_context['locations'] and device in self.default_context['devices']:
# print risk + " " + employee + " " + location + " " + device
# print "classification: (" + str(cls) + "," + str(self.classifier.get_prediction(data, employee, location, device, risk)["id"]) + ")"
# print "regression: (" + str(value) + "," + str(self.regression.get_prediction(data, employee, location, device, risk)) + ")"
line.append(self.classifier.get_prediction(data, employee, location, device, risk)["id"])
line.append(self.regression.get_prediction(data, employee, location, device, risk))
else:
line.append(0)
line.append(0)
result.append(line)
writer = csv.writer(open('result.csv', 'wb'))
label = ['name', 'risk', 'cls', 'prob']
for risk in self.all_risks:
for employee in context['employees']:
for location in context['locations']:
for device in context['devices']:
label.append(risk + '-' + employee + '-' + location + '-' + device)
label.append(risk + '-' + employee + '-' + location + '-' + device)
writer.writerow(label)
writer.writerows(result)
if __name__ == "__main__":
TestGeneration().generate_testing_set() | mapto/sprks | test/simulation/test_mainsim.py | Python | mit | 5,169 |
import logging
import sdk_cmd
from tests import auth
LOG = logging.getLogger(__name__)
def add_acls(user: str, marathon_task: str, topic: str, zookeeper_endpoint: str, env_str=None):
"""
Add Producer and Consumer ACLs for the specifed user and topic
"""
_add_role_acls(["--producer"], user, marathon_task, topic, zookeeper_endpoint, env_str)
_add_role_acls(
["--consumer", "--group=*"], user, marathon_task, topic, zookeeper_endpoint, env_str
)
def remove_acls(user: str, marathon_task: str, topic: str, zookeeper_endpoint: str, env_str=None):
"""
Remove Producer and Consumer ACLs for the specifed user and topic
"""
_remove_role_acls(["--producer"], user, marathon_task, topic, zookeeper_endpoint, env_str)
_remove_role_acls(
["--consumer", "--group=*"], user, marathon_task, topic, zookeeper_endpoint, env_str
)
def _modify_role_acls(
action: str,
roles: list,
user: str,
marathon_task: str,
topic: str,
zookeeper_endpoint: str,
env_str: str = None,
) -> tuple:
if not action.startswith("--"):
action = "--{}".format(action)
cmd_list = [
"kafka-acls",
"--topic",
topic,
"--authorizer-properties",
"zookeeper.connect={}".format(zookeeper_endpoint),
action,
"--force",
"--allow-principal",
"User:{}".format(user),
]
cmd_list.extend(roles)
cmd = auth.get_bash_command(" ".join(cmd_list), env_str)
LOG.info("Running: %s", cmd)
output = sdk_cmd.marathon_task_exec(marathon_task, cmd)
LOG.info(output)
return output
def _add_role_acls(
roles: list,
user: str,
marathon_task: str,
topic: str,
zookeeper_endpoint: str,
env_str: str = None,
) -> tuple:
return _modify_role_acls("add", roles, user, marathon_task, topic, zookeeper_endpoint, env_str)
def _remove_role_acls(
roles: list,
user: str,
marathon_task: str,
topic: str,
zookeeper_endpoint: str,
env_str: str = None,
) -> tuple:
return _modify_role_acls(
"remove", roles, user, marathon_task, topic, zookeeper_endpoint, env_str
)
def filter_empty_offsets(offsets: list, additional: list = []) -> list:
ignored_offsets = [None, {}, {"0": ""}]
ignored_offsets.extend(additional)
LOG.info("Filtering %s from %s", ignored_offsets, offsets)
remaining = [o for o in offsets if o not in ignored_offsets]
LOG.info("Remaining offsets: %s", remaining)
return remaining
| mesosphere/dcos-kafka-service | frameworks/kafka/tests/topics.py | Python | apache-2.0 | 2,534 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
class Config:
def __init__(self, **kwargs):
self.config = dict()
self.config.update(kwargs)
@classmethod
def from_dict(cls, defaults):
config = Config()
config.add_from_dict(defaults)
return config
@classmethod
def from_json(cls, path):
config = Config()
config.add_from_json(path)
return config
def add_from_default_locations(self):
self.add_from_json('/etc/zookerrc')
self.add_from_json(os.path.join(os.path.expanduser('~'), '.zookerrc'))
return self
def add_from_dict(self, data):
if data and type(data) == dict:
self.config.update(data)
return self
def add_from_json(self, path):
if os.path.exists(path):
with open(path, 'r') as f:
from_json = json.load(f)
self.config = dict(self.config.items() + from_json.items())
return self
def add_from_args(self, args):
if args:
self.config.update(vars(args))
return self
def get(self, key, default=None):
return self.config.get(key, self.config.get(key.replace('_', '-'), default))
def __getattr__(self, item):
return self.get(item)
def __getitem__(self, item):
return self.get(item)
def __contains__(self, item):
return item in self.config
def __len__(self):
return len(self.config)
| peletomi/zooker | src/zooker/config.py | Python | bsd-3-clause | 1,510 |
#!/bin/usr/env python
# coding:utf-8
__author__ = 'Samuel Chen <samuel.net@gmail.com>'
import sys
import time
x = ['\\', '/', '-']
for i in range(50):
j = i % 3
sys.stdout.write('File x is downloading .. %s [' % x[j])
sys.stdout.write('=' * i)
sys.stdout.write('-')
sys.stdout.write(' ' * (49-i))
sys.stdout.flush()
sys.stdout.write('] ')
time.sleep(0.1)
sys.stdout.write('\r')
sys.stdout.flush()
print
| samuelchen/code-snippets | python/cli-progress-bar.py | Python | gpl-2.0 | 487 |
#
# livef1
#
# f1item.py - Storage class for the drivers information
#
# Copyright (c) 2014 Marc Bertens <marc.bertens@pe2mbs.nl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Special thanks to the live-f1 project 'https://launchpad.net/live-f1'
# * Scott James Remnant
# * Dave Pusey
#
# For showing the way of program logic.
#
import logging
class F1Item( object ):
def __init__( self, _name, _value = '', _data = 0 ):
self.__data = _data
self.__value = _value
self.__name = _name
self.log = logging.getLogger('live-f1')
return
# end def
def __getData( self ):
return self.__data
# end def
def __setData( self, d ):
self.__data = d
return
# end def
def __getValue( self ):
return self.__value
# end def
def __setValue( self, d ):
if type( self.__value ) == "<type 'int'>":
if d.isdigit():
self.__value = int( d )
else:
self.__value = 0
# end if
else:
self.__value = d
return
# end def
data = property( __getData, __setData )
value = property( __getValue, __setValue )
def getHtml( self ):
if type( self.__value ) == "<type 'int'>":
return "<td class='%s' id='status_data_%02X'> %i </td>" % ( self.__name, self.__data, self.__value )
# end if
return "<td class='%s' id='status_data_%02X'> %s </td>" % ( self.__name, self.__data, self.__value )
| livef1/Livef1-web | src/item.py | Python | gpl-2.0 | 2,348 |
__all__ = ['game_page',
'player_page',
'team_page'] | muneebalam/scrapenhl2 | scrapenhl2/plot/app/__init__.py | Python | mit | 73 |
import numpy as np
from .base import AnalyticalPropagator
from ..constants import Earth
from ..dates import timedelta
class J2(AnalyticalPropagator):
"""Analytical propagator taking only the Earth-J2 effect into account"""
@property
def orbit(self):
return self._orbit if hasattr(self, "_orbit") else None
@orbit.setter
def orbit(self, orbit):
self._orbit = orbit.copy(form="keplerian_mean")
def propagate(self, date):
if type(date) is timedelta: # pragma: no cover
date = self.orbit.date + date
delta_t = (date - self.orbit.date).total_seconds()
mu = Earth.mu
r = self.orbit.infos.r
re = Earth.r
n = self.orbit.infos.n
a, e, i = self.orbit[:3]
com = n * re ** 2 * Earth.J2 / (a ** 2 * (1 - e ** 2) ** 2)
dΩ = -3 / 2 * com * np.cos(i)
dω = 3 / 4 * com * (4 - 5 * np.sin(i) ** 2)
dM = 3 / 4 * com * np.sqrt(1 - e ** 2) * (2 - 3 * np.sin(i) ** 2)
delta = np.array([0.0, 0.0, 0.0, dΩ, dω, dM + n]) * delta_t
new = self.orbit[:] + delta
new[3:] = new[3:] % (2 * np.pi)
new.date = date
return new.copy(form="cartesian")
| galactics/space-api | beyond/propagators/j2.py | Python | gpl-3.0 | 1,215 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Donors: Hasa Sàrl, Open Net Sàrl and Prisme Solutions Informatique SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class PaymentOrder(orm.Model):
_inherit = 'payment.order'
_columns = {
'dta_ids': fields.one2many(
'ir.attachment',
'res_id',
domain=[('res_model', '=', 'payment.order'),
('name', 'like', 'DTA')]
)
}
| cgaspoz/l10n-switzerland | __unported__/l10n_ch_dta/payment.py | Python | agpl-3.0 | 1,325 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2016 Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from syntaq import Lexer
class PartitionerTestCase(TestCase):
def test_can_partition_with_marker(self):
t = Lexer("~", "**")
tokens = list(t.tokens("foo**bar"))
assert tokens == ["foo", "**", "bar"]
def test_can_partition_without_marker(self):
t = Lexer("~", "**")
tokens = list(t.tokens("foo bar"))
assert tokens == ["foo bar"]
def test_can_partition_with_marker_at_start(self):
t = Lexer("~", "**")
tokens = list(t.tokens("**foo bar"))
assert tokens == ["**", "foo bar"]
def test_can_partition_with_marker_at_end(self):
t = Lexer("~", "**")
tokens = list(t.tokens("foo bar**"))
assert tokens == ["foo bar", "**"]
def test_can_partition_with_escaped_marker(self):
t = Lexer("~", "**")
tokens = list(t.tokens("foo~**bar"))
assert tokens == ["foo", "~**", "bar"]
def test_can_partition_with_escaped_other(self):
t = Lexer("~", "**")
tokens = list(t.tokens("foo~bar"))
assert tokens == ["foo~bar"]
| nigelsmall/nige.tech | test/test_partitioner.py | Python | apache-2.0 | 1,730 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Provide API-callable functions for knowledge base management (using kb's).
"""
from invenio import bibknowledge_dblayer
from invenio.bibformat_config import CFG_BIBFORMAT_ELEMENTS_PATH
from invenio.config import CFG_WEBDIR
import os
import re
def get_kb_mappings(kb_name="", key="", value="", match_type="s"):
"""Get mappings from kb kb_name. If key given, give only those with
left side (mapFrom) = key. If value given, give only those with
right side (mapTo) = value.
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a list of mappings
"""
return bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default=""):
"""Get one unique mapping. If not found, return default
@param kb_name: the name of the kb
@param key: include only lines matching this on left side in the results
@param value: include only lines matching this on right side in the results
@param match_type: s = substring match, e = exact match
@return a mapping
"""
mappings = bibknowledge_dblayer.get_kb_mappings(kb_name,
keylike=key, valuelike=value,
match_type=match_type)
if len(mappings) == 0:
return default
else:
return mappings[0]
def add_kb_mapping(kb_name, key, value=""):
"""
Adds a new mapping to given kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
@param value: the value of the mapping
"""
bibknowledge_dblayer.add_kb_mapping(kb_name, key, value)
def remove_kb_mapping(kb_name, key):
"""
Delete an existing kb mapping in kb
@param kb_name: the name of the kb where to insert the new value
@param key: the key of the mapping
"""
bibknowledge_dblayer.remove_kb_mapping(kb_name, key)
def update_kb_mapping(kb_name, old_key, key, value):
"""
Update an existing kb mapping with key old_key with a new key and value
@param kb_name: the name of the kb where to insert the new value
@param old_key: the key of the mapping in the kb
@param key: the new key of the mapping
@param value: the new value of the mapping
"""
#check if this is a KEY change or a VALUE change.
if (old_key == key):
#value change, ok to change
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
else:
#you can change a key unless there is already a key like that
if kb_mapping_exists(kb_name, key):
pass #no, don't change
else:
bibknowledge_dblayer.update_kb_mapping(kb_name, old_key, key, value)
def kb_exists(kb_name):
"""Returns True if a kb with the given name exists
@param kb_name: the name of the knowledge base
"""
return bibknowledge_dblayer.kb_exists(kb_name)
def get_kb_name(kb_id):
"""
Returns the name of the kb given by id
@param kb_id: the id of the knowledge base
"""
return bibknowledge_dblayer.get_kb_name(kb_id)
def update_kb_attributes(kb_name, new_name, new_description):
"""
Updates given kb_name with a new name and new description
@param kb_name: the name of the kb to update
@param new_name: the new name for the kb
@param new_description: the new description for the kb
"""
bibknowledge_dblayer.update_kb(kb_name, new_name, new_description)
def add_kb(kb_name="Untitled", kb_type=None):
"""
Adds a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
@param kb_name: the name of the kb
@param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
@return the id of the newly created kb
"""
name = kb_name
i = 1
while bibknowledge_dblayer.kb_exists(name):
name = kb_name + " " + str(i)
i += 1
kb_id = bibknowledge_dblayer.add_kb(name, "", kb_type)
return kb_id
def kb_mapping_exists(kb_name, key):
"""
Returns the information if a mapping exists.
@param kb_name: knowledge base name
@param key: left side (mapFrom)
"""
return bibknowledge_dblayer.kb_mapping_exists(kb_name, key)
def delete_kb(kb_name):
"""
Deletes given kb from database
@param kb_name: knowledge base name
"""
bibknowledge_dblayer.delete_kb(kb_name)
def get_kb_id(kb_name):
"""
Gets the id by name
@param kb_name knowledge base name
"""
return bibknowledge_dblayer.get_kb_id(kb_name)
# Knowledge Bases Dependencies
##
def get_elements_that_use_kb(name):
"""
This routine is obsolete.
Returns a list of elements that call given kb
[ {'filename':"filename_1.py"
'name': "a name"
},
...
]
Returns elements sorted by name
"""
format_elements = {}
#Retrieve all elements in files
files = os.listdir(CFG_BIBFORMAT_ELEMENTS_PATH)
for filename in files:
if filename.endswith(".py"):
path = CFG_BIBFORMAT_ELEMENTS_PATH + os.sep + filename
formatf = open(path, 'r')
code = formatf.read()
formatf.close()
# Search for use of kb inside code
kb_pattern = re.compile('''
(bfo.kb)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<kb>%s) #kb
[\'"]+\s* #Single or double quote
, #comma
''' % name, re.VERBOSE | re.MULTILINE | re.IGNORECASE)
result = kb_pattern.search(code)
if result is not None:
name = ("".join(filename.split(".")[:-1])).lower()
if name.startswith("bfe_"):
name = name[4:]
format_elements[name] = {'filename':filename, 'name': name}
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
###kb functions for export
def get_kbs_info(kbtype="", searchkbname=""):
"""A convenience method that calls dblayer
@param kbtype: type of kb -- get only kb's of this type
@param searchkbname: get only kb's where this sting appears in the name
"""
return bibknowledge_dblayer.get_kbs_info(kbtype, searchkbname)
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""
Returns an array of values "authority file" type = just values.
@param kb_name: name of kb
@param searchname: get these values, according to searchtype
@param searchtype: s=substring, e=exact
"""
return bibknowledge_dblayer.get_kba_values(kb_name, searchname, searchtype)
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns an array of keys.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
"""
return bibknowledge_dblayer.get_kbr_keys(kb_name, searchkey,
searchvalue, searchtype)
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns an array of keys.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
"""
return bibknowledge_dblayer.get_kbr_values(kb_name, searchkey,
searchvalue, searchtype)
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""
Returns a list of dictionaries that match the search.
@param kb_name: the name of the knowledge base
@param searchkey: search using this key
@param searchvalue: search using this value
@param searchtype: s = substring, e=exact
@return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
return bibknowledge_dblayer.get_kbr_items(kb_name, searchkey,
searchvalue, searchtype)
def get_kbd_values(kbname, searchwith=""):
"""
To be used by bibedit. Returns a list of values based on a dynamic kb.
@param kbname: name of the knowledge base
@param searchwith: a term to search with
"""
import search_engine
#first check that the kb in question is dynamic
kbid = bibknowledge_dblayer.get_kb_id(kbname)
if not kbid:
return []
kbtype = bibknowledge_dblayer.get_kb_type(kbid)
if not kbtype:
return []
if kbtype != 'd':
return []
#get the configuration so that we see what the field is
confdict = bibknowledge_dblayer.get_kb_dyn_config(kbid)
if not confdict:
return []
if not confdict.has_key('field'):
return []
field = confdict['field']
expression = confdict['expression']
collection = ""
if confdict.has_key('collection'):
collection = confdict['collection']
reclist = [] #return this
#see if searchwith is a quoted expression
if searchwith:
if not searchwith.startswith("'"):
searchwith = "'"+searchwith
if not searchwith.endswith("'"):
searchwith = searchwith+"'"
if searchwith and expression:
if (expression.count('%') > 0) or (expression.endswith(":*")):
expression = expression.replace("%", searchwith)
expression = expression.replace(":*", ':'+searchwith)
else:
#no %.. just make a combination
expression = expression + "and "+searchwith
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else: #either no expr or no searchwith.. but never mind about searchwith
if expression:
reclist = search_engine.perform_request_search(p=expression, cc=collection)
else:
#make a fake expression so that only records that have this field
#will be returned
fake_exp = "/.*/"
if searchwith:
fake_exp = searchwith
reclist = search_engine.perform_request_search(f=field, p=fake_exp, cc=collection)
if reclist:
fieldvaluelist = search_engine.get_most_popular_field_values(reclist,
field)
val_list = []
for f in fieldvaluelist:
(val, dummy) = f
#support "starts with",
#indicated by the * at the end of the searchstring
if searchwith and (len(searchwith) > 2) and (searchwith[-2] == '*'):
if (val.startswith(searchwith[1:-3])):
val_list.append(val)
else:
val_list.append(val)
return val_list
return [] #in case nothing worked
def get_kbd_values_for_bibedit(tag, collection="", searchwith=""):
"""
A specific convenience method: based on a tag and collection, create a temporary dynamic knowledge base
a return its values.
Note: the performace of this function is ok compared to a plain
perform req search / get most popular fields -pair. The overhead is about 5% with large record sets.
@param tag: the tag like 100__a
@param collection: collection id
@param searchwith: the string to search. If empty, match all.
"""
kb_id = add_kb(kb_name="tmp_dynamic", kb_type='dynamic')
#get the kb name since it may be catenated by a number
#in case there are concurrent calls.
kb_name = get_kb_name(kb_id)
bibknowledge_dblayer.save_kb_dyn_config(kb_id, tag, collection, searchwith)
#now, get stuff
myvalues = get_kbd_values(kb_name, searchwith)
#the tmp dyn kb is now useless, delete it
delete_kb(kb_name)
return myvalues
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile. If searchwith is defined,
return only items that match with it.
@param taxonomyfilename: full path+name of the RDF file
@param templatefile: full path+name of the XSLT file
@param searchwith: a term to search with
"""
import libxml2
import libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
def get_kbt_items_for_bibedit(kbtname, tag="", searchwith=""):
"""
A simplifield, customized version of the function get_kbt_items.
Traverses an RDF document. By default returns all leaves. If
tag defined returns the content of that tag.
If searchwith defined, returns leaves that match it.
Warning! In order to make this faster, the matching field values
cannot be multi-line!
@param kbtname: name of the taxonony kb
@param tag: name of tag whose content
@param searchwith: a term to search with
"""
import libxml2
import libxslt
#get the actual file based on the kbt name
kb_id = get_kb_id(kbtname)
if not kb_id:
return []
#get the rdf file..
rdfname = CFG_WEBDIR+"/kbfiles/"+str(kb_id)+".rdf"
if not os.path.exists(rdfname):
return []
#parse the doc with static xslt
styledoc = libxml2.parseDoc("""
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<xsl:output method="xml" standalone="yes" omit-xml-declaration="yes" indent="no"/>
<xsl:template match="rdf:RDF">
<foo><!--just having some tag here speeds up output by 10x-->
<xsl:apply-templates />
</foo>
</xsl:template>
<xsl:template match="*">
<!--hi><xsl:value-of select="local-name()"/></hi-->
<xsl:if test="local-name()='"""+tag+"""'">
<myout><xsl:value-of select="normalize-space(.)"/></myout>
</xsl:if>
<!--traverse down in tree!-->
<xsl:text>
</xsl:text>
<xsl:apply-templates />
</xsl:template>
</xsl:stylesheet>
""")
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(rdfname)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
#take only those with myout..
if line.count("<myout>") > 0:
#remove the myout tag..
line = line[9:]
line = line[:-8]
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
ritems.append(line)
return ritems
if __name__ == "__main__":
pass
| kaplun/Invenio-OpenAIRE | modules/bibknowledge/lib/bibknowledge.py | Python | gpl-2.0 | 16,708 |
from ..broker import Broker
class DevicePolicyBroker(Broker):
controller = "device_policies"
def show(self, **kwargs):
"""Shows the details for the specified device policy.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device policy methods. The listed methods will be called on each device policy returned and included in the output. Available methods are: policy_name, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_policy: The device policy identified by the specified DevicePolicyID.
:rtype device_policy: DevicePolicy
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device policies. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device whose policy status this record represents.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device whose policy status this record represents.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device policies as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device policy methods. The listed methods will be called on each device policy returned and included in the output. Available methods are: policy_name, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevicePolicyID
:param sort: The data field(s) to use for sorting the output. Default is DevicePolicyID. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DevicePolicy. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_policies: An array of the DevicePolicy objects that match the specified input criteria.
:rtype device_policies: Array of DevicePolicy
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device policies matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device whose policy status this record represents.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device whose policy status this record represents.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevicePolicyChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevicePolicyChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type DevicePolicyEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type DevicePolicyEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyStartTime: The starting effective time of this revision of the record.
:type DevicePolicyStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyStartTime: The starting effective time of this revision of the record.
:type DevicePolicyStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyTimestamp: The date and time this record was collected or calculated.
:type DevicePolicyTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyTimestamp: The date and time this record was collected or calculated.
:type DevicePolicyTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules.
:type PolicyRulesChecked: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules.
:type PolicyRulesChecked: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesError: The total number of rules in this policy that the device failed with error status.
:type PolicyRulesError: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesError: The total number of rules in this policy that the device failed with error status.
:type PolicyRulesError: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status.
:type PolicyRulesFailed: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status.
:type PolicyRulesFailed: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesInfo: The total number of rules in this policy that the device failed with info status.
:type PolicyRulesInfo: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesInfo: The total number of rules in this policy that the device failed with info status.
:type PolicyRulesInfo: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device.
:type PolicyRulesInvalid: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device.
:type PolicyRulesInvalid: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesPassed: The total number of rules in this policy that the device passed successfully.
:type PolicyRulesPassed: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesPassed: The total number of rules in this policy that the device passed successfully.
:type PolicyRulesPassed: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters.
:type PolicyRulesSkipped: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters.
:type PolicyRulesSkipped: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device.
:type PolicyRulesTotal: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device.
:type PolicyRulesTotal: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device).
:type PolicyRulesUnknown: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device).
:type PolicyRulesUnknown: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified.
:type PolicyRulesValid: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified.
:type PolicyRulesValid: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesWarning: The total number of rules in this policy that the device failed with warning status.
:type PolicyRulesWarning: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesWarning: The total number of rules in this policy that the device failed with warning status.
:type PolicyRulesWarning: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyStatus: The current status of this policy for this device.
:type PolicyStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyStatus: The current status of this policy for this device.
:type PolicyStatus: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device policies as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device policy methods. The listed methods will be called on each device policy returned and included in the output. Available methods are: policy_name, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevicePolicyID
:param sort: The data field(s) to use for sorting the output. Default is DevicePolicyID. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DevicePolicy. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device policies, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, DevicePolicyChangedCols, DevicePolicyEndTime, DevicePolicyID, DevicePolicyStartTime, DevicePolicyTimestamp, PolicyID, PolicyRulesChecked, PolicyRulesError, PolicyRulesFailed, PolicyRulesInfo, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesSkipped, PolicyRulesTotal, PolicyRulesUnknown, PolicyRulesValid, PolicyRulesWarning, PolicyStatus.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_policies: An array of the DevicePolicy objects that match the specified input criteria.
:rtype device_policies: Array of DevicePolicy
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device policies matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, DevicePolicyChangedCols, DevicePolicyEndTime, DevicePolicyID, DevicePolicyStartTime, DevicePolicyTimestamp, PolicyID, PolicyRulesChecked, PolicyRulesError, PolicyRulesFailed, PolicyRulesInfo, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesSkipped, PolicyRulesTotal, PolicyRulesUnknown, PolicyRulesValid, PolicyRulesWarning, PolicyStatus.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device whose policy status this record represents. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyChangedCols: The operator to apply to the field DevicePolicyChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyChangedCols: If op_DevicePolicyChangedCols is specified, the field named in this input will be compared to the value in DevicePolicyChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyChangedCols must be specified if op_DevicePolicyChangedCols is specified.
:type val_f_DevicePolicyChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyChangedCols: If op_DevicePolicyChangedCols is specified, this value will be compared to the value in DevicePolicyChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyChangedCols must be specified if op_DevicePolicyChangedCols is specified.
:type val_c_DevicePolicyChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyEndTime: The operator to apply to the field DevicePolicyEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyEndTime: If op_DevicePolicyEndTime is specified, the field named in this input will be compared to the value in DevicePolicyEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyEndTime must be specified if op_DevicePolicyEndTime is specified.
:type val_f_DevicePolicyEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyEndTime: If op_DevicePolicyEndTime is specified, this value will be compared to the value in DevicePolicyEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyEndTime must be specified if op_DevicePolicyEndTime is specified.
:type val_c_DevicePolicyEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyID: The operator to apply to the field DevicePolicyID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyID: The internal NetMRI identifier for this device policy status record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyID: If op_DevicePolicyID is specified, the field named in this input will be compared to the value in DevicePolicyID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyID must be specified if op_DevicePolicyID is specified.
:type val_f_DevicePolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyID: If op_DevicePolicyID is specified, this value will be compared to the value in DevicePolicyID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyID must be specified if op_DevicePolicyID is specified.
:type val_c_DevicePolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyStartTime: The operator to apply to the field DevicePolicyStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyStartTime: If op_DevicePolicyStartTime is specified, the field named in this input will be compared to the value in DevicePolicyStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyStartTime must be specified if op_DevicePolicyStartTime is specified.
:type val_f_DevicePolicyStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyStartTime: If op_DevicePolicyStartTime is specified, this value will be compared to the value in DevicePolicyStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyStartTime must be specified if op_DevicePolicyStartTime is specified.
:type val_c_DevicePolicyStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyTimestamp: The operator to apply to the field DevicePolicyTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyTimestamp: If op_DevicePolicyTimestamp is specified, the field named in this input will be compared to the value in DevicePolicyTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyTimestamp must be specified if op_DevicePolicyTimestamp is specified.
:type val_f_DevicePolicyTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyTimestamp: If op_DevicePolicyTimestamp is specified, this value will be compared to the value in DevicePolicyTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyTimestamp must be specified if op_DevicePolicyTimestamp is specified.
:type val_c_DevicePolicyTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyID: The operator to apply to the field PolicyID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyID: The internal NetMRI identifier for the policy whose status this record represents. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyID: If op_PolicyID is specified, the field named in this input will be compared to the value in PolicyID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyID must be specified if op_PolicyID is specified.
:type val_f_PolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyID: If op_PolicyID is specified, this value will be compared to the value in PolicyID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyID must be specified if op_PolicyID is specified.
:type val_c_PolicyID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesChecked: The operator to apply to the field PolicyRulesChecked. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesChecked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesChecked: If op_PolicyRulesChecked is specified, the field named in this input will be compared to the value in PolicyRulesChecked using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesChecked must be specified if op_PolicyRulesChecked is specified.
:type val_f_PolicyRulesChecked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesChecked: If op_PolicyRulesChecked is specified, this value will be compared to the value in PolicyRulesChecked using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesChecked must be specified if op_PolicyRulesChecked is specified.
:type val_c_PolicyRulesChecked: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesError: The operator to apply to the field PolicyRulesError. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesError: The total number of rules in this policy that the device failed with error status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesError: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesError: If op_PolicyRulesError is specified, the field named in this input will be compared to the value in PolicyRulesError using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesError must be specified if op_PolicyRulesError is specified.
:type val_f_PolicyRulesError: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesError: If op_PolicyRulesError is specified, this value will be compared to the value in PolicyRulesError using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesError must be specified if op_PolicyRulesError is specified.
:type val_c_PolicyRulesError: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesFailed: The operator to apply to the field PolicyRulesFailed. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesFailed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesFailed: If op_PolicyRulesFailed is specified, the field named in this input will be compared to the value in PolicyRulesFailed using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesFailed must be specified if op_PolicyRulesFailed is specified.
:type val_f_PolicyRulesFailed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesFailed: If op_PolicyRulesFailed is specified, this value will be compared to the value in PolicyRulesFailed using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesFailed must be specified if op_PolicyRulesFailed is specified.
:type val_c_PolicyRulesFailed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesInfo: The operator to apply to the field PolicyRulesInfo. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesInfo: The total number of rules in this policy that the device failed with info status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesInfo: If op_PolicyRulesInfo is specified, the field named in this input will be compared to the value in PolicyRulesInfo using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesInfo must be specified if op_PolicyRulesInfo is specified.
:type val_f_PolicyRulesInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesInfo: If op_PolicyRulesInfo is specified, this value will be compared to the value in PolicyRulesInfo using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesInfo must be specified if op_PolicyRulesInfo is specified.
:type val_c_PolicyRulesInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesInvalid: The operator to apply to the field PolicyRulesInvalid. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesInvalid: If op_PolicyRulesInvalid is specified, the field named in this input will be compared to the value in PolicyRulesInvalid using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesInvalid must be specified if op_PolicyRulesInvalid is specified.
:type val_f_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesInvalid: If op_PolicyRulesInvalid is specified, this value will be compared to the value in PolicyRulesInvalid using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesInvalid must be specified if op_PolicyRulesInvalid is specified.
:type val_c_PolicyRulesInvalid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesPassed: The operator to apply to the field PolicyRulesPassed. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesPassed: The total number of rules in this policy that the device passed successfully. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesPassed: If op_PolicyRulesPassed is specified, the field named in this input will be compared to the value in PolicyRulesPassed using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesPassed must be specified if op_PolicyRulesPassed is specified.
:type val_f_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesPassed: If op_PolicyRulesPassed is specified, this value will be compared to the value in PolicyRulesPassed using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesPassed must be specified if op_PolicyRulesPassed is specified.
:type val_c_PolicyRulesPassed: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesSkipped: The operator to apply to the field PolicyRulesSkipped. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesSkipped: If op_PolicyRulesSkipped is specified, the field named in this input will be compared to the value in PolicyRulesSkipped using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesSkipped must be specified if op_PolicyRulesSkipped is specified.
:type val_f_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesSkipped: If op_PolicyRulesSkipped is specified, this value will be compared to the value in PolicyRulesSkipped using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesSkipped must be specified if op_PolicyRulesSkipped is specified.
:type val_c_PolicyRulesSkipped: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesTotal: The operator to apply to the field PolicyRulesTotal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesTotal: If op_PolicyRulesTotal is specified, the field named in this input will be compared to the value in PolicyRulesTotal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesTotal must be specified if op_PolicyRulesTotal is specified.
:type val_f_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesTotal: If op_PolicyRulesTotal is specified, this value will be compared to the value in PolicyRulesTotal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesTotal must be specified if op_PolicyRulesTotal is specified.
:type val_c_PolicyRulesTotal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesUnknown: The operator to apply to the field PolicyRulesUnknown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesUnknown: If op_PolicyRulesUnknown is specified, the field named in this input will be compared to the value in PolicyRulesUnknown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesUnknown must be specified if op_PolicyRulesUnknown is specified.
:type val_f_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesUnknown: If op_PolicyRulesUnknown is specified, this value will be compared to the value in PolicyRulesUnknown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesUnknown must be specified if op_PolicyRulesUnknown is specified.
:type val_c_PolicyRulesUnknown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesValid: The operator to apply to the field PolicyRulesValid. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesValid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesValid: If op_PolicyRulesValid is specified, the field named in this input will be compared to the value in PolicyRulesValid using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesValid must be specified if op_PolicyRulesValid is specified.
:type val_f_PolicyRulesValid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesValid: If op_PolicyRulesValid is specified, this value will be compared to the value in PolicyRulesValid using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesValid must be specified if op_PolicyRulesValid is specified.
:type val_c_PolicyRulesValid: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyRulesWarning: The operator to apply to the field PolicyRulesWarning. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyRulesWarning: The total number of rules in this policy that the device failed with warning status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyRulesWarning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyRulesWarning: If op_PolicyRulesWarning is specified, the field named in this input will be compared to the value in PolicyRulesWarning using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyRulesWarning must be specified if op_PolicyRulesWarning is specified.
:type val_f_PolicyRulesWarning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyRulesWarning: If op_PolicyRulesWarning is specified, this value will be compared to the value in PolicyRulesWarning using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyRulesWarning must be specified if op_PolicyRulesWarning is specified.
:type val_c_PolicyRulesWarning: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PolicyStatus: The operator to apply to the field PolicyStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PolicyStatus: The current status of this policy for this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PolicyStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PolicyStatus: If op_PolicyStatus is specified, the field named in this input will be compared to the value in PolicyStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PolicyStatus must be specified if op_PolicyStatus is specified.
:type val_f_PolicyStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PolicyStatus: If op_PolicyStatus is specified, this value will be compared to the value in PolicyStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PolicyStatus must be specified if op_PolicyStatus is specified.
:type val_c_PolicyStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device policies as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device policy methods. The listed methods will be called on each device policy returned and included in the output. Available methods are: policy_name, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevicePolicyID
:param sort: The data field(s) to use for sorting the output. Default is DevicePolicyID. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DevicePolicy. Valid values are DevicePolicyID, DeviceID, PolicyID, DevicePolicyStartTime, DevicePolicyEndTime, DevicePolicyChangedCols, DevicePolicyTimestamp, DataSourceID, PolicyStatus, PolicyRulesTotal, PolicyRulesValid, PolicyRulesChecked, PolicyRulesInvalid, PolicyRulesPassed, PolicyRulesFailed, PolicyRulesError, PolicyRulesWarning, PolicyRulesInfo, PolicyRulesSkipped, PolicyRulesUnknown. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_policies: An array of the DevicePolicy objects that match the specified input criteria.
:rtype device_policies: Array of DevicePolicy
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The NetMRI device that collected this record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The NetMRI device that collected this record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def infradevice(self, **kwargs):
"""The device whose policy status this record represents.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device whose policy status this record represents.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def policy_name(self, **kwargs):
"""The policy name.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The policy name.
:rtype : String
"""
return self.api_request(self._get_method_fullname("policy_name"), kwargs)
def device(self, **kwargs):
"""The device whose policy status this record represents.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device whose policy status this record represents.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def summary(self, **kwargs):
"""Provides a single method to retrieve policies, devices, and per-device, per-policy status summaries. The start/limit parameters apply to the devices, not to the policies. That is, if a device is returned, all of the policy status summaries for that device will be returned, regardless of the start/limit parameters.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The device group or list of device groups for which to obtain status.
:type GroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_skipped_ind: If true, the results will include devices with policy status 'Skipped'.
:type show_skipped_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The date and time for which to retrieve the policy status. The current status will be returned if omitted.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19.
:type limit: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return effective_policies: The policies, as they were defined at the date and time specified in the timestamp parameter.
:rtype effective_policies: Array of EffectivePolicy
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return devices: The device information, as of the date and time specified in the timestamp parameter.
:rtype devices: Array of Device
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_policies: The per-device, per-policy status.
:rtype device_policies: Array of DevicePolicy
"""
return self.api_list_request(self._get_method_fullname("summary"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_4_0/device_policy_broker.py | Python | apache-2.0 | 79,094 |
"""
@author: Geir Sporsheim
@license: see LICENCE for details
"""
from twisted.internet import defer
from twisted.conch.ssh.userauth import SSHUserAuthClient
class AutomaticUserAuthClient(SSHUserAuthClient):
"""User Auth Client that automatically authenticate using stored credentials.
"""
def __init__(self, user, connection,
password=None, privateKey=None, publicKey=None):
SSHUserAuthClient.__init__(self, user, connection)
self.password = password
self.privateKey = privateKey
self.publicKey = publicKey
def getGenericAnswers(self, name, instruction, prompts):
"""Called when the server requests keyboard interactive authentication
"""
responses = []
for prompt, _echo in prompts:
password = self.getPassword(prompt)
responses.append(password)
return defer.succeed(responses)
def getPassword(self, prompt=None):
if not self.password:
return None # Return none to indicate we do not want to retry
return defer.succeed(self.password)
def getPrivateKey(self):
if self.privateKey:
return defer.succeed(self.privateKey)
return defer.fail(None)
def getPublicKey(self):
if not self.publicKey:
return None
return defer.succeed(self.publicKey)
| sporsh/carnifex | carnifex/ssh/userauth.py | Python | mit | 1,372 |
def foo(*args, **kwargs):
print(args, kwargs)
foo(0,
*[1],
<warning descr="Python version 2.7 does not allow positional arguments after *expression">2</warning>,
<warning descr="Python version 2.7 does not allow duplicate *expressions">*[3]</warning>,
<warning descr="Python version 2.7 does not allow positional arguments after *expression">4</warning>,
a='a',
<warning descr="Python version 2.7 does not allow duplicate *expressions">*[6]</warning>,
b='b',
<warning descr="Python version 2.7 does not allow duplicate *expressions">*[7]</warning>,
c='c',
**{'d': 'd'},
<warning descr="Python version 2.7 does not allow keyword arguments after **expression">e='e'</warning>,
<warning descr="Python version 2.7 does not allow duplicate **expressions">**{'f': 'f'}</warning>)
| siosio/intellij-community | python/testData/inspections/PyCompatibilityInspection/argumentsUnpackingGeneralizations.py | Python | apache-2.0 | 828 |
import time
from machine import I2C
ALTITUDE = const(0)
PRESSURE = const(1)
class MPL3115A2exception(Exception):
pass
class MPL3115A2:
MPL3115_I2CADDR = const(0x60)
MPL3115_STATUS = const(0x00)
MPL3115_PRESSURE_DATA_MSB = const(0x01)
MPL3115_PRESSURE_DATA_CSB = const(0x02)
MPL3115_PRESSURE_DATA_LSB = const(0x03)
MPL3115_TEMP_DATA_MSB = const(0x04)
MPL3115_TEMP_DATA_LSB = const(0x05)
MPL3115_DR_STATUS = const(0x06)
MPL3115_DELTA_DATA = const(0x07)
MPL3115_WHO_AM_I = const(0x0c)
MPL3115_FIFO_STATUS = const(0x0d)
MPL3115_FIFO_DATA = const(0x0e)
MPL3115_FIFO_SETUP = const(0x0e)
MPL3115_TIME_DELAY = const(0x10)
MPL3115_SYS_MODE = const(0x11)
MPL3115_INT_SORCE = const(0x12)
MPL3115_PT_DATA_CFG = const(0x13)
MPL3115_BAR_IN_MSB = const(0x14)
MPL3115_P_ARLARM_MSB = const(0x16)
MPL3115_T_ARLARM = const(0x18)
MPL3115_P_ARLARM_WND_MSB = const(0x19)
MPL3115_T_ARLARM_WND = const(0x1b)
MPL3115_P_MIN_DATA = const(0x1c)
MPL3115_T_MIN_DATA = const(0x1f)
MPL3115_P_MAX_DATA = const(0x21)
MPL3115_T_MAX_DATA = const(0x24)
MPL3115_CTRL_REG1 = const(0x26)
MPL3115_CTRL_REG2 = const(0x27)
MPL3115_CTRL_REG3 = const(0x28)
MPL3115_CTRL_REG4 = const(0x29)
MPL3115_CTRL_REG5 = const(0x2a)
MPL3115_OFFSET_P = const(0x2b)
MPL3115_OFFSET_T = const(0x2c)
MPL3115_OFFSET_H = const(0x2d)
def __init__(self, pysense = None, sda = 'P22', scl = 'P21', mode = PRESSURE):
if pysense is not None:
self.i2c = pysense.i2c
else:
self.i2c = I2C(0, mode=I2C.MASTER, pins=(sda, scl))
self.STA_reg = bytearray(1)
self.mode = mode
if self.mode is PRESSURE:
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_CTRL_REG1, bytes([0x38])) # barometer mode, not raw, oversampling 128, minimum time 512 ms
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_PT_DATA_CFG, bytes([0x07])) # no events detected
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_CTRL_REG1, bytes([0x39])) # active
elif self.mode is ALTITUDE:
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_CTRL_REG1, bytes([0xB8])) # altitude mode, not raw, oversampling 128, minimum time 512 ms
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_PT_DATA_CFG, bytes([0x07])) # no events detected
self.i2c.writeto_mem(MPL3115_I2CADDR, MPL3115_CTRL_REG1, bytes([0xB9])) # active
else:
raise MPL3115A2exception("Invalid Mode MPL3115A2")
if self._read_status():
pass
else:
raise MPL3115A2exception("Error with MPL3115A2")
def _read_status(self):
while True:
self.i2c.readfrom_mem_into(MPL3115_I2CADDR, MPL3115_STATUS, self.STA_reg)
if(self.STA_reg[0] == 0):
time.sleep(0.01)
pass
elif(self.STA_reg[0] & 0x04) == 4:
return True
else:
return False
def pressure(self):
if self.mode == ALTITUDE:
raise MPL3115A2exception("Incorrect Measurement Mode MPL3115A2")
OUT_P_MSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_MSB,1)
OUT_P_CSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_CSB,1)
OUT_P_LSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_LSB,1)
return float((OUT_P_MSB[0] << 10) + (OUT_P_CSB[0] << 2) + ((OUT_P_LSB[0] >> 6) & 0x03) + ((OUT_P_LSB[0] >> 4) & 0x03) / 4.0)
def altitude(self):
if self.mode == PRESSURE:
raise MPL3115A2exception("Incorrect Measurement Mode MPL3115A2")
OUT_P_MSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_MSB,1)
OUT_P_CSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_CSB,1)
OUT_P_LSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_PRESSURE_DATA_LSB,1)
alt_int = (OUT_P_MSB[0] << 8) + (OUT_P_CSB[0])
alt_frac = ((OUT_P_LSB[0] >> 4) & 0x0F)
if alt_int > 32767:
alt_int -= 65536
return float(alt_int + alt_frac / 16.0)
def temperature(self):
OUT_T_MSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_TEMP_DATA_MSB,1)
OUT_T_LSB = self.i2c.readfrom_mem(MPL3115_I2CADDR, MPL3115_TEMP_DATA_LSB,1)
temp_int = OUT_T_MSB[0]
temp_frac = OUT_T_LSB[0]
if temp_int > 127:
temp_int -= 256
return float(temp_int + temp_frac / 256.0)
| beia/beialand | practice/pycom-mqtt/Pysense/lib/MPL3115A2.py | Python | gpl-3.0 | 4,540 |
"""
Django settings for isrp project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3xvcm*gx(q(d4$x29^g!pmy##bzgw*0=+pr%67&v6c=t_rf&i('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'isrp_app.apps.ISRPAppConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'isrp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'isrp.wsgi.application'
SITE_ID = 1
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pl-PL'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/accounts/login/'
ACCOUNT_ADAPTER = 'isrp.adapters.NoNewUsersAccountAdapter'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
ISRP = {
'DAY_BEGIN': 8,
'DAY_END': 18
}
try:
from isrp.settings_local import *
except ImportError as e:
pass
| marcin-pwr/isrp | isrp/settings.py | Python | mit | 4,030 |
#! /usr/bin/python2.7
#------------------------------------------------------------------------
# Copyright (c) 1997-2001 by Total Control Software
# All Rights Reserved
#------------------------------------------------------------------------
#
# Module Name: dbShelve.py
#
# Description: A reimplementation of the standard shelve.py that
# forces the use of cPickle, and DB.
#
# Creation Date: 11/3/97 3:39:04PM
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# 13-Dec-2000: Updated to be used with the new bsddb3 package.
# Added DBShelfCursor class.
#
#------------------------------------------------------------------------
"""Manage shelves of pickled objects using bsddb database files for the
storage.
"""
#------------------------------------------------------------------------
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
if sys.version_info[0] >= 3 :
import cPickle # Will be converted to "pickle" by "2to3"
else :
if sys.version_info < (2, 6) :
import cPickle
else :
# When we drop support for python 2.3 and 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.filterwarnings(...)
# ...
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.3, 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
import warnings
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.filterwarnings('ignore',
message='the cPickle module has been removed in Python 3.0',
category=DeprecationWarning)
import cPickle
finally :
w.__exit__()
del w
#At version 2.3 cPickle switched to using protocol instead of bin
if sys.version_info >= (2, 3):
HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
# In python 2.3.*, "cPickle.dumps" accepts no
# named parameters. "pickle.dumps" accepts them,
# so this seems a bug.
if sys.version_info < (2, 4):
def _dumps(object, protocol):
return cPickle.dumps(object, protocol)
else :
def _dumps(object, protocol):
return cPickle.dumps(object, protocol=protocol)
else:
HIGHEST_PROTOCOL = None
def _dumps(object, protocol):
return cPickle.dumps(object, bin=protocol)
if sys.version_info < (2, 6) :
try:
from UserDict import DictMixin
except ImportError:
# DictMixin is new in Python 2.3
class DictMixin: pass
MutableMapping = DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
#------------------------------------------------------------------------
def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
dbenv=None, dbname=None):
"""
A simple factory function for compatibility with the standard
shleve.py module. It can be used like this, where key is a string
and data is a pickleable object:
from bsddb import dbshelve
db = dbshelve.open(filename)
db[key] = data
db.close()
"""
if type(flags) == type(''):
sflag = flags
if sflag == 'r':
flags = db.DB_RDONLY
elif sflag == 'rw':
flags = 0
elif sflag == 'w':
flags = db.DB_CREATE
elif sflag == 'c':
flags = db.DB_CREATE
elif sflag == 'n':
flags = db.DB_TRUNCATE | db.DB_CREATE
else:
raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
d = DBShelf(dbenv)
d.open(filename, dbname, filetype, flags, mode)
return d
#---------------------------------------------------------------------------
class DBShelveError(db.DBError): pass
class DBShelf(MutableMapping):
"""A shelf to hold pickled objects, built upon a bsddb DB object. It
automatically pickles/unpickles data objects going to/from the DB.
"""
def __init__(self, dbenv=None):
self.db = db.DB(dbenv)
self._closed = True
if HIGHEST_PROTOCOL:
self.protocol = HIGHEST_PROTOCOL
else:
self.protocol = 1
def __del__(self):
self.close()
def __getattr__(self, name):
"""Many methods we can just pass through to the DB object.
(See below)
"""
return getattr(self.db, name)
#-----------------------------------
# Dictionary access methods
def __len__(self):
return len(self.db)
def __getitem__(self, key):
data = self.db[key]
return cPickle.loads(data)
def __setitem__(self, key, value):
data = _dumps(value, self.protocol)
self.db[key] = data
def __delitem__(self, key):
del self.db[key]
def keys(self, txn=None):
if txn is not None:
return self.db.keys(txn)
else:
return self.db.keys()
if sys.version_info >= (2, 6) :
def __iter__(self) : # XXX: Load all keys in memory :-(
for k in self.db.keys() :
yield k
# Do this when "DB" support iteration
# Or is it enough to pass thru "getattr"?
#
# def __iter__(self) :
# return self.db.__iter__()
def open(self, *args, **kwargs):
self.db.open(*args, **kwargs)
self._closed = False
def close(self, *args, **kwargs):
self.db.close(*args, **kwargs)
self._closed = True
def __repr__(self):
if self._closed:
return '<DBShelf @ 0x%x - closed>' % (id(self))
else:
return repr(dict(self.iteritems()))
def items(self, txn=None):
if txn is not None:
items = self.db.items(txn)
else:
items = self.db.items()
newitems = []
for k, v in items:
newitems.append( (k, cPickle.loads(v)) )
return newitems
def values(self, txn=None):
if txn is not None:
values = self.db.values(txn)
else:
values = self.db.values()
return map(cPickle.loads, values)
#-----------------------------------
# Other methods
def __append(self, value, txn=None):
data = _dumps(value, self.protocol)
return self.db.append(data, txn)
def append(self, value, txn=None):
if self.get_type() == db.DB_RECNO:
return self.__append(value, txn=txn)
raise DBShelveError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
def associate(self, secondaryDB, callback, flags=0):
def _shelf_callback(priKey, priData, realCallback=callback):
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(priData, bytes) :
data = cPickle.loads(priData)
else :
data = cPickle.loads(bytes(priData, "iso8859-1")) # 8 bits
return realCallback(priKey, data)
return self.db.associate(secondaryDB, _shelf_callback, flags)
#def get(self, key, default=None, txn=None, flags=0):
def get(self, *args, **kw):
# We do it with *args and **kw so if the default value wasn't
# given nothing is passed to the extension module. That way
# an exception can be raised if set_get_returns_none is turned
# off.
data = self.db.get(*args, **kw)
try:
return cPickle.loads(data)
except (EOFError, TypeError, cPickle.UnpicklingError):
return data # we may be getting the default value, or None,
# so it doesn't need unpickled.
def get_both(self, key, value, txn=None, flags=0):
data = _dumps(value, self.protocol)
data = self.db.get(key, data, txn, flags)
return cPickle.loads(data)
def cursor(self, txn=None, flags=0):
c = DBShelfCursor(self.db.cursor(txn, flags))
c.protocol = self.protocol
return c
def put(self, key, value, txn=None, flags=0):
data = _dumps(value, self.protocol)
return self.db.put(key, data, txn, flags)
def join(self, cursorList, flags=0):
raise NotImplementedError
#----------------------------------------------
# Methods allowed to pass-through to self.db
#
# close, delete, fd, get_byteswapped, get_type, has_key,
# key_range, open, remove, rename, stat, sync,
# upgrade, verify, and all set_* methods.
#---------------------------------------------------------------------------
class DBShelfCursor:
"""
"""
def __init__(self, cursor):
self.dbc = cursor
def __del__(self):
self.close()
def __getattr__(self, name):
"""Some methods we can just pass through to the cursor object. (See below)"""
return getattr(self.dbc, name)
#----------------------------------------------
def dup(self, flags=0):
c = DBShelfCursor(self.dbc.dup(flags))
c.protocol = self.protocol
return c
def put(self, key, value, flags=0):
data = _dumps(value, self.protocol)
return self.dbc.put(key, data, flags)
def get(self, *args):
count = len(args) # a method overloading hack
method = getattr(self, 'get_%d' % count)
method(*args)
def get_1(self, flags):
rec = self.dbc.get(flags)
return self._extract(rec)
def get_2(self, key, flags):
rec = self.dbc.get(key, flags)
return self._extract(rec)
def get_3(self, key, value, flags):
data = _dumps(value, self.protocol)
rec = self.dbc.get(key, flags)
return self._extract(rec)
def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
def get_both(self, key, value, flags=0):
data = _dumps(value, self.protocol)
rec = self.dbc.get_both(key, flags)
return self._extract(rec)
def set(self, key, flags=0):
rec = self.dbc.set(key, flags)
return self._extract(rec)
def set_range(self, key, flags=0):
rec = self.dbc.set_range(key, flags)
return self._extract(rec)
def set_recno(self, recno, flags=0):
rec = self.dbc.set_recno(recno, flags)
return self._extract(rec)
set_both = get_both
def _extract(self, rec):
if rec is None:
return None
else:
key, data = rec
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(data, bytes) :
return key, cPickle.loads(data)
else :
return key, cPickle.loads(bytes(data, "iso8859-1")) # 8 bits
#----------------------------------------------
# Methods allowed to pass-through to self.dbc
#
# close, count, delete, get_recno, join_item
#---------------------------------------------------------------------------
| 2ndy/RaspIM | usr/lib/python2.7/bsddb/dbshelve.py | Python | gpl-2.0 | 12,204 |
import csv
class WiggleParser(object):
"""
Warning - this does not implement the full specification!
"""
def entries(self, input_fh):
track_name = None
replicon = None
span = None
pos_value_pairs = []
for line in input_fh:
row = line[:-1].split()
if len(row) == 0:
continue
if row[0].startswith("track"):
track_name = self._track_name(row)
elif row[0].startswith("variableStep"):
if replicon:
prev_replicon = replicon
prev_span = span
prev_pos_value_pairs = pos_value_pairs
replicon = self._replicon(row)
span = None
pos_value_pairs = []
yield WiggleEntry(
track_name, prev_replicon, prev_span,
prev_pos_value_pairs)
else:
replicon = self._replicon(row)
else:
pos_value_pairs.append([int(row[0]), float(row[1])])
yield WiggleEntry(track_name, replicon, span, pos_value_pairs)
def _replicon(self, row):
return self._attrs_and_values(row)["chrom"]
def _track_name(self, row):
return self._attrs_and_values(row)["name"]
def _attrs_and_values(self, row):
attrs_and_values = {}
for attr_and_value in row:
if not "=" in attr_and_value:
continue
attr, value = attr_and_value.split("=")
value = value.replace("\"", "")
attrs_and_values[attr] = value
return attrs_and_values
class WiggleEntry(object):
def __init__(self, track_name, replicon, span, pos_value_pairs):
self.track_name = track_name
self.replicon = replicon
self.span = span
self.pos_value_pairs = pos_value_pairs
class WiggleWriter(object):
def __init__(self, track_str, fh):
self._fh = fh
self._fh.write(("track type=wiggle_0 name=\"%s\"\n" % (track_str)))
def write_replicons_coverages(
self, replicon, pos_value_pairs, factor=1.0):
self._fh.write("variableStep chrom=%s span=1\n" % (replicon))
# Filter values of 0 and multiply the remaining ones by
# the given factor.
self._fh.write(
"\n".join(["%s %s" % (pos, coverage * factor)
for pos, coverage in
filter(lambda pos_and_cov: pos_and_cov[1] != 0.0,
pos_value_pairs)]) + "\n")
def close_file(self):
self._fh.close()
| konrad/kufpybio | kufpybio/wiggle.py | Python | isc | 2,713 |
from django.db import models
import datetime
# Create your models here.
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.question
def was_published_today(self):
return self.pub_date.date() == datetime.date.today()
was_published_today.short_description = 'Published today?'
class Choice(models.Model):
poll = models.ForeignKey(Poll,related_name='choices')
choice = models.CharField(max_length=200)
votes = models.IntegerField()
def __unicode__(self):
return self.choice
| JanezStupar/tastypie_demo | polls/models.py | Python | mit | 647 |
EXTERNAL_RESOURCES = {
"ensembl" : [
{
"url" : "http://plants.ensembl.org/biomart/martservice?query=",
"file" : "dosa_resources/ensembl_mapping.xml",
"output" : "ensembl_mapping.list",
"description" : "Source: Ensembl Plants databases. Downloaded from Biomart."
},
{
"url" : "http://plants.ensembl.org/biomart/martservice?query=",
"file" : "dosa_resources/uniprot_mapping.xml",
"output" : "uniprot_mapping.list",
"description" : "Source: UniProt + Ensembl Plants databases. Downloaded from Biomart."
}
]
} | fikipollo/paintomics3 | PaintomicsServer/src/AdminTools/scripts/dosa_resources/download_conf.py | Python | gpl-3.0 | 861 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Modifier classes dealing with spectral domain changes or corrections."""
import logging
import xarray as xr
from satpy.modifiers import ModifierBase
try:
from pyspectral.near_infrared_reflectance import Calculator
except ImportError:
Calculator = None
try:
from pyorbital.astronomy import sun_zenith_angle
except ImportError:
sun_zenith_angle = None
logger = logging.getLogger(__name__)
class NIRReflectance(ModifierBase):
"""Get the reflective part of NIR bands."""
TERMINATOR_LIMIT = 85.0
MASKING_LIMIT = 88.0
def __init__(self, sunz_threshold=TERMINATOR_LIMIT,
masking_limit=MASKING_LIMIT, **kwargs):
"""Collect custom configuration values.
Args:
sunz_threshold: The threshold sun zenith angle used when deriving
the near infrared reflectance. Above this angle the derivation
will assume this sun-zenith everywhere. Unless overridden, the
default threshold of 85.0 degrees will be used.
masking_limit: Mask the data (set to NaN) above this Sun zenith angle.
By default the limit is at 88.0 degrees. If set to `None`, no masking
is done.
"""
self.sun_zenith_threshold = sunz_threshold
self.masking_limit = masking_limit
super(NIRReflectance, self).__init__(**kwargs)
def __call__(self, projectables, optional_datasets=None, **info):
"""Get the reflectance part of an NIR channel.
Not supposed to be used for wavelength outside [3, 4] µm.
"""
projectables = self.match_data_arrays(projectables)
return self._get_reflectance_as_dataarray(projectables, optional_datasets)
def _get_reflectance_as_dataarray(self, projectables, optional_datasets):
"""Get the reflectance as a dataarray."""
_nir, _tb11 = projectables
da_nir = _nir.data
da_tb11 = _tb11.data
da_tb13_4 = self._get_tb13_4_from_optionals(optional_datasets)
da_sun_zenith = self._get_sun_zenith_from_provided_data(projectables, optional_datasets)
logger.info('Getting reflective part of %s', _nir.attrs['name'])
reflectance = self._get_reflectance_as_dask(da_nir, da_tb11, da_tb13_4, da_sun_zenith, _nir.attrs)
proj = self._create_modified_dataarray(reflectance, base_dataarray=_nir)
proj.attrs['units'] = '%'
return proj
@staticmethod
def _get_tb13_4_from_optionals(optional_datasets):
tb13_4 = None
for dataset in optional_datasets:
wavelengths = dataset.attrs.get('wavelength', [100., 0, 0])
if (dataset.attrs.get('units') == 'K' and
wavelengths[0] <= 13.4 <= wavelengths[2]):
tb13_4 = dataset.data
return tb13_4
@staticmethod
def _get_sun_zenith_from_provided_data(projectables, optional_datasets):
"""Get the sunz from available data or compute it if unavailable."""
sun_zenith = None
for dataset in optional_datasets:
if dataset.attrs.get("standard_name") == "solar_zenith_angle":
sun_zenith = dataset.data
if sun_zenith is None:
if sun_zenith_angle is None:
raise ImportError("Module pyorbital.astronomy needed to compute sun zenith angles.")
_nir = projectables[0]
lons, lats = _nir.attrs["area"].get_lonlats(chunks=_nir.data.chunks)
sun_zenith = sun_zenith_angle(_nir.attrs['start_time'], lons, lats)
return sun_zenith
def _create_modified_dataarray(self, reflectance, base_dataarray):
proj = xr.DataArray(reflectance, dims=base_dataarray.dims,
coords=base_dataarray.coords, attrs=base_dataarray.attrs.copy())
proj.attrs['sun_zenith_threshold'] = self.sun_zenith_threshold
proj.attrs['sun_zenith_masking_limit'] = self.masking_limit
self.apply_modifier_info(base_dataarray, proj)
return proj
def _get_reflectance_as_dask(self, da_nir, da_tb11, da_tb13_4, da_sun_zenith, metadata):
"""Calculate 3.x reflectance in % with pyspectral from dask arrays."""
reflectance_3x_calculator = self._init_reflectance_calculator(metadata)
return reflectance_3x_calculator.reflectance_from_tbs(da_sun_zenith, da_nir, da_tb11, tb_ir_co2=da_tb13_4) * 100
def _init_reflectance_calculator(self, metadata):
"""Initialize the 3.x reflectance derivations."""
if not Calculator:
logger.info("Couldn't load pyspectral")
raise ImportError("No module named pyspectral.near_infrared_reflectance")
reflectance_3x_calculator = Calculator(metadata['platform_name'], metadata['sensor'], metadata['name'],
sunz_threshold=self.sun_zenith_threshold,
masking_limit=self.masking_limit)
return reflectance_3x_calculator
class NIREmissivePartFromReflectance(NIRReflectance):
"""Get the emissive part of NIR bands."""
def __init__(self, sunz_threshold=None, **kwargs):
"""Collect custom configuration values.
Args:
sunz_threshold: The threshold sun zenith angle used when deriving
the near infrared reflectance. Above this angle the derivation
will assume this sun-zenith everywhere. Default None, in which
case the default threshold defined in Pyspectral will be used.
"""
self.sunz_threshold = sunz_threshold
super(NIREmissivePartFromReflectance, self).__init__(sunz_threshold=sunz_threshold, **kwargs)
def __call__(self, projectables, optional_datasets=None, **info):
"""Get the emissive part an NIR channel after having derived the reflectance.
Not supposed to be used for wavelength outside [3, 4] µm.
"""
projectables = self.match_data_arrays(projectables)
return self._get_emissivity_as_dataarray(projectables, optional_datasets)
def _get_emissivity_as_dataarray(self, projectables, optional_datasets):
"""Get the emissivity as a dataarray."""
_nir, _tb11 = projectables
da_nir = _nir.data
da_tb11 = _tb11.data
da_tb13_4 = self._get_tb13_4_from_optionals(optional_datasets)
da_sun_zenith = self._get_sun_zenith_from_provided_data(projectables, optional_datasets)
logger.info('Getting emissive part of %s', _nir.attrs['name'])
emissivity = self._get_emissivity_as_dask(da_nir, da_tb11, da_tb13_4, da_sun_zenith, _nir.attrs)
proj = self._create_modified_dataarray(emissivity, base_dataarray=_nir)
proj.attrs['units'] = 'K'
return proj
def _get_emissivity_as_dask(self, da_nir, da_tb11, da_tb13_4, da_sun_zenith, metadata):
"""Get the emissivity from pyspectral."""
reflectance_3x_calculator = self._init_reflectance_calculator(metadata)
# Use the nir and thermal ir brightness temperatures and derive the reflectance using
# PySpectral. The reflectance is stored internally in PySpectral and
# needs to be derived first in order to get the emissive part.
reflectance_3x_calculator.reflectance_from_tbs(da_sun_zenith, da_nir, da_tb11, tb_ir_co2=da_tb13_4)
return reflectance_3x_calculator.emissive_part_3x()
| pytroll/satpy | satpy/modifiers/spectral.py | Python | gpl-3.0 | 8,141 |
from TSatPy import StateOperator, Estimator, State
from TSatPy.Clock import Metronome
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import time
print('P-Estimator With a Propagated State')
x_ic = State.State(
State.Quaternion([0,0,1],radians=190/180.0*np.pi),
State.BodyRate([0,0,0.3]))
k = 0.2
Kp = StateOperator.StateGain(
StateOperator.QuaternionGain(k),
StateOperator.BodyRateGain(np.eye(3) * k))
c = Metronome()
pid = Estimator.PID(c, ic=x_ic)
pid.set_Kp(Kp)
x_m = State.State(
State.Quaternion([0,0.1,1],radians=44/180.0*np.pi),
State.BodyRate([0,0,3.1]))
I = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
p = State.Plant(I, x_m, c)
N = 10
ts = []
measured = {
'eulers': [],
'scalars': [],
'bodyrates': [],
}
est = {
'eulers': [],
'scalars': [],
'bodyrates': [],
}
end_time = c.tick() + N
while c.tick() <= end_time:
p.propagate()
pid.update(p.x)
ts.append(c.tick())
measured['eulers'].append(p.x.q.vector.T.tolist()[0])
measured['scalars'].append(p.x.q.scalar)
measured['bodyrates'].append(p.x.w.w.T.tolist()[0])
est['eulers'].append(pid.x_hat.q.vector.T.tolist()[0])
est['scalars'].append(pid.x_hat.q.scalar)
est['bodyrates'].append(pid.x_hat.w.w.T.tolist()[0])
time.sleep(0.1)
def state_parameter_timeseries(x, measured, est):
axes = []
fig = plt.figure(figsize=(11,9), dpi=80, facecolor='w', edgecolor='k')
axes.append(fig.add_subplot(4,2,1))
axes[-1].plot(x, [e[0] for e in measured['eulers']], c='r', lw=2)
axes[-1].plot(x, [e[0] for e in est['eulers']], c='b', lw=2)
axes.append(fig.add_subplot(4,2,3))
axes[-1].plot(x, [e[1] for e in measured['eulers']], c='r', lw=2)
axes[-1].plot(x, [e[1] for e in est['eulers']], c='b', lw=2)
axes.append(fig.add_subplot(4,2,5))
axes[-1].plot(x, [e[2] for e in measured['eulers']], c='r', lw=2)
axes[-1].plot(x, [e[2] for e in est['eulers']], c='b', lw=2)
axes.append(fig.add_subplot(4,2,7))
axes[-1].plot(x, measured['scalars'], c='r', lw=2)
axes[-1].plot(x, est['scalars'], c='b', lw=2)
axes[-1].set_xlabel('$t(k)$')
axes.append(fig.add_subplot(4,2,2))
axes[-1].plot(x, [w[0] for w in measured['bodyrates']], c='r', lw=2)
axes[-1].plot(x, [w[0] for w in est['bodyrates']], c='b', lw=2)
axes.append(fig.add_subplot(4,2,4))
axes[-1].plot(x, [w[1] for w in measured['bodyrates']], c='r', lw=2)
axes[-1].plot(x, [w[1] for w in est['bodyrates']], c='b', lw=2)
axes.append(fig.add_subplot(4,2,6))
axes[-1].plot(x, [w[2] for w in measured['bodyrates']], c='r', lw=2)
axes[-1].plot(x, [w[2] for w in est['bodyrates']], c='b', lw=2)
axes[-1].set_xlabel('$t(k)$')
for ax in axes:
ax.grid(color='0.75', linestyle='--', linewidth=1)
for ax, label in zip(axes, ['q_1','q_2','q_3','q_0','\omega_1','\omega_2','\omega_3']):
ax.set_ylabel('$%s$' % label)
plt.tight_layout()
plt.show()
state_parameter_timeseries(ts, measured, est)
| MathYourLife/TSatPy-thesis | tex/sample_scripts/Estimators_02.py | Python | mit | 3,049 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs linker tests on a particular device."""
import logging
import os.path
import sys
import traceback
from pylib import constants
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.linker import test_case
from pylib.utils import apk_helper
# Name of the Android package to install for this to work.
_PACKAGE_NAME = 'ChromiumLinkerTest'
class LinkerExceptionTestResult(base_test_result.BaseTestResult):
"""Test result corresponding to a python exception in a host-custom test."""
def __init__(self, test_name, exc_info):
"""Constructs a LinkerExceptionTestResult object.
Args:
test_name: name of the test which raised an exception.
exc_info: exception info, ostensibly from sys.exc_info().
"""
exc_type, exc_value, exc_traceback = exc_info
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log_msg = 'Exception:\n' + trace_info
super(LinkerExceptionTestResult, self).__init__(
test_name,
base_test_result.ResultType.FAIL,
log = "%s %s" % (exc_type, log_msg))
class LinkerTestRunner(base_test_runner.BaseTestRunner):
"""Orchestrates running a set of linker tests.
Any Python exceptions in the tests are caught and translated into a failed
result, rather than being re-raised on the main thread.
"""
#override
def __init__(self, device, tool, push_deps, cleanup_test_files):
"""Creates a new LinkerTestRunner.
Args:
device: Attached android device.
tool: Name of the Valgrind tool.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
super(LinkerTestRunner, self).__init__(device, tool, push_deps,
cleanup_test_files)
#override
def InstallTestPackage(self):
apk_path = os.path.join(
constants.GetOutDirectory(), 'apks', '%s.apk' % _PACKAGE_NAME)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
package_name = apk_helper.GetPackageName(apk_path)
self.adb.ManagedInstall(apk_path, package_name)
#override
def RunTest(self, test):
"""Sets up and runs a test case.
Args:
test: An object which is ostensibly a subclass of LinkerTestCaseBase.
Returns:
A TestRunResults object which contains the result produced by the test
and, in the case of a failure, the test that should be retried.
"""
assert isinstance(test, test_case.LinkerTestCaseBase)
try:
results = test.Run(self.device)
except Exception:
logging.exception('Caught exception while trying to run test: ' +
test.tagged_name)
exc_info = sys.exc_info()
results = base_test_result.TestRunResults()
results.AddResult(LinkerExceptionTestResult(
test.tagged_name, exc_info))
if not results.DidRunPass():
return results, test
else:
return results, None
| patrickm/chromium.src | build/android/pylib/linker/test_runner.py | Python | bsd-3-clause | 3,271 |
"""
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
:codeauthor: Herbert Buurman <herbert.buurman@ogd.nl>
"""
import pytest
import salt.modules.mine as mine
import salt.utils.mine
from salt.utils.odict import OrderedDict
from tests.support.mock import MagicMock, patch
class FakeCache:
def __init__(self):
self.data = {}
def store(self, bank, key, value):
self.data[bank, key] = value
return "FakeCache:StoreSuccess!"
def fetch(self, bank, key):
return self.data.get((bank, key), {})
def debug(self):
print("{}:FakeCache dump:\n{}".format(__name__, self.data))
@pytest.fixture
def mock_cache():
cache = FakeCache()
return cache
@pytest.fixture
def configure_loader_modules(mock_cache):
mock_match = MagicMock(return_value="webserver")
return {
mine: {
"__salt__": {
"match.glob": mock_match,
"match.pcre": mock_match,
"match.list": mock_match,
"match.grain": mock_match,
"match.grain_pcre": mock_match,
"match.ipcidr": mock_match,
"match.compound": mock_match,
"match.pillar": mock_match,
"match.pillar_pcre": mock_match,
"data.get": lambda key: mock_cache.fetch("minions/webserver", key),
"data.update": lambda key, value: mock_cache.store(
"minions/webserver", key, value
),
}
}
}
def test_get_local_empty():
"""
Tests getting function data from the local mine that does not exist.
"""
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret_classic = mine.get("*", "funky.doodle")
ret_dict = mine.get("*", ["funky.doodle"])
assert ret_classic == {}
assert ret_dict == {}
def test_get_local_classic(mock_cache):
"""
Tests getting function data from the local mine that was stored without minion-side ACL.
This verifies backwards compatible reads from a salt mine.
"""
# Prefill minion cache with a non-ACL value
mock_cache.store("minions/webserver", "mine_cache", {"foobard": "barfood"})
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret_classic = mine.get("*", "foobard")
ret_dict = mine.get("*", ["foobard"])
assert ret_classic == {"webserver": "barfood"}
assert ret_dict == {"foobard": {"webserver": "barfood"}}
def test_send_get_local(mock_cache):
"""
Tests sending an item to the mine in the minion's local cache,
and then immediately fetching it again (since tests are executed unordered).
Also verify that the stored mine cache does not use ACL data structure
without allow_tgt passed.
"""
foo_ret = "baz"
ip_ret = "2001:db8::1:3"
with patch.dict(
mine.__opts__, {"file_client": "local", "id": "webserver"}
), patch.dict(
mine.__salt__,
{
"network.ip_addrs": MagicMock(return_value=ip_ret),
"foo.bar": MagicMock(return_value=foo_ret),
},
):
ret = mine.send("ip_addr", mine_function="network.ip_addrs")
mine.send("foo.bar")
assert ret == "FakeCache:StoreSuccess!"
assert mock_cache.fetch("minions/webserver", "mine_cache") == {
"ip_addr": ip_ret,
"foo.bar": foo_ret,
}
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret_single = mine.get("*", "ip_addr")
ret_single_dict = mine.get("*", ["ip_addr"])
ret_multi = mine.get("*", "ip_addr,foo.bar")
ret_multi2 = mine.get("*", ["ip_addr", "foo.bar"])
assert ret_single == {"webserver": ip_ret}
assert ret_single_dict == {"ip_addr": {"webserver": ip_ret}}
assert ret_multi == {
"ip_addr": {"webserver": ip_ret},
"foo.bar": {"webserver": foo_ret},
}
assert ret_multi == ret_multi2
def test_send_get_acl_local(mock_cache):
"""
Tests sending an item to the mine in the minion's local cache,
including ACL information (useless when only working locally, but hey),
and then immediately fetching it again (since tests are executed unordered).
Also verify that the stored mine cache has the correct structure (with ACL)
when using allow_tgt and no ACL without allow_tgt.
"""
foo_ret = "baz"
ip_ret = "2001:db8::1:3"
with patch.dict(
mine.__opts__, {"file_client": "local", "id": "webserver"}
), patch.dict(
mine.__salt__,
{
"network.ip_addrs": MagicMock(return_value=ip_ret),
"foo.bar": MagicMock(return_value=foo_ret),
},
):
ret = mine.send(
"ip_addr",
mine_function="network.ip_addrs",
allow_tgt="web*",
allow_tgt_type="glob",
)
mine.send("foo.bar")
assert ret == "FakeCache:StoreSuccess!"
assert mock_cache.fetch("minions/webserver", "mine_cache") == {
"ip_addr": {
salt.utils.mine.MINE_ITEM_ACL_DATA: ip_ret,
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "web*",
"allow_tgt_type": "glob",
},
"foo.bar": foo_ret,
}
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret_single = mine.get("*", "ip_addr")
assert ret_single == {"webserver": ip_ret}
def test_send_master():
"""
Tests sending an item to the mine stored on the master.
This is done by capturing the load that is sent to the master.
"""
foo_ret = "baz"
with patch.object(
mine, "_mine_send", MagicMock(side_effect=lambda x, y: x)
), patch.dict(
mine.__salt__, {"foo.bar": MagicMock(return_value=foo_ret)}
), patch.dict(
mine.__opts__, {"file_client": "remote", "id": "foo"}
):
ret = mine.send("foo.bar")
assert ret == {
"id": "foo",
"cmd": "_mine",
"data": {"foo.bar": foo_ret},
"clear": False,
}
def test_send_master_acl():
"""
Tests sending an item to the mine stored on the master. Now with ACL.
This is done by capturing the load that is sent to the master.
"""
foo_ret = "baz"
with patch.object(
mine, "_mine_send", MagicMock(side_effect=lambda x, y: x)
), patch.dict(
mine.__salt__, {"foo.bar": MagicMock(return_value=foo_ret)}
), patch.dict(
mine.__opts__, {"file_client": "remote", "id": "foo"}
):
ret = mine.send("foo.bar", allow_tgt="roles:web", allow_tgt_type="grains")
assert ret == {
"id": "foo",
"cmd": "_mine",
"data": {
"foo.bar": {
salt.utils.mine.MINE_ITEM_ACL_DATA: foo_ret,
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "roles:web",
"allow_tgt_type": "grains",
},
},
"clear": False,
}
def test_get_master():
"""
Tests loading a mine item from the mine stored on the master.
"""
foo_ret = "baz"
mock_load = {
"tgt_type": "qux",
"tgt": foo_ret,
"cmd": "_mine_get",
"fun": "foo.bar",
"id": "foo",
}
with patch.object(mine, "_mine_get", MagicMock(return_value=mock_load)), patch.dict(
mine.__opts__, {"file_client": "remote", "id": "foo"}
):
assert mine.get("*", "foo.bar") == mock_load
def test_get_master_exclude_minion():
"""
Tests the exclude_minion-parameter for mine.get
"""
_mine_get_ret = OrderedDict([("webserver", "value")])
with patch.object(
mine, "_mine_get", MagicMock(return_value=_mine_get_ret)
), patch.dict(mine.__opts__, {"file_client": "remote", "id": "webserver"}):
assert mine.get("*", "foo.bar", exclude_minion=False) == {"webserver": "value"}
assert mine.get("*", "foo.bar", exclude_minion=True) == {}
def test_update_local(mock_cache):
"""
Tests the ``update``-function on the minion's local cache.
Updates mine functions from pillar+config only.
"""
kernel_ret = "Linux!"
foo_ret = "baz"
ip_ret = "2001:db8::1:3"
config_mine_functions = {
"ip_addr": {"mine_function": "network.ip_addrs"},
"network.ip_addrs": [],
"kernel": [
{"mine_function": "grains.get"},
"kernel",
{"allow_tgt": "web*"},
],
"foo.bar": {"allow_tgt": "G@roles:webserver", "allow_tgt_type": "compound"},
}
with patch.dict(
mine.__opts__, {"file_client": "local", "id": "webserver"}
), patch.dict(
mine.__salt__,
{
"config.merge": MagicMock(return_value=config_mine_functions),
"grains.get": lambda x: kernel_ret,
"network.ip_addrs": MagicMock(return_value=ip_ret),
"foo.bar": MagicMock(return_value=foo_ret),
},
):
ret = mine.update()
assert ret == "FakeCache:StoreSuccess!"
# Check if the mine entries have been stored properly in the FakeCache.
assert mock_cache.fetch("minions/webserver", "mine_cache") == {
"ip_addr": ip_ret,
"network.ip_addrs": ip_ret,
"foo.bar": {
salt.utils.mine.MINE_ITEM_ACL_DATA: foo_ret,
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "G@roles:webserver",
"allow_tgt_type": "compound",
},
"kernel": {
salt.utils.mine.MINE_ITEM_ACL_DATA: kernel_ret,
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "web*",
},
}
def test_update_local_specific(mock_cache):
"""
Tests the ``update``-function on the minion's local cache.
Updates mine functions from kwargs only.
"""
foo_ret = "baz"
ip_ret = "2001:db8::1:3"
manual_mine_functions = {
"ip_addr": {"mine_function": "network.ip_addrs"},
"network.ip_addrs": [],
"kernel": [
{"mine_function": "grains.get"},
"kernel",
{"allow_tgt": "web*"},
],
"foo.bar": {"allow_tgt": "G@roles:webserver", "allow_tgt_type": "compound"},
}
with patch.dict(
mine.__opts__, {"file_client": "local", "id": "webserver"}
), patch.dict(
mine.__salt__,
{
"config.merge": MagicMock(return_value={}),
"grains.get": lambda x: "Linux!!",
"network.ip_addrs": MagicMock(return_value=ip_ret),
"foo.bar": MagicMock(return_value=foo_ret),
},
):
ret = mine.update(mine_functions=manual_mine_functions)
assert ret == "FakeCache:StoreSuccess!"
assert mock_cache.fetch("minions/webserver", "mine_cache") == {
"ip_addr": ip_ret,
"network.ip_addrs": ip_ret,
"foo.bar": {
salt.utils.mine.MINE_ITEM_ACL_DATA: foo_ret,
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "G@roles:webserver",
"allow_tgt_type": "compound",
},
"kernel": {
salt.utils.mine.MINE_ITEM_ACL_DATA: "Linux!!",
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "web*",
},
}
def test_update_master():
"""
Tests whether the ``update``-function sends the correct data to the master.
"""
kernel_ret = "Linux!"
foo_ret = "baz"
ip_ret = "2001:db8::1:3"
config_mine_functions = {
"ip_addr": {"mine_function": "network.ip_addrs"},
"network.ip_addrs": [],
"kernel": [{"mine_function": "grains.get"}, "kernel"],
"foo.bar": {},
}
mock_load = {
"id": "webserver",
"cmd": "_mine",
"data": {
"ip_addr": ip_ret,
"network.ip_addrs": ip_ret,
"foo.bar": foo_ret,
"kernel": kernel_ret,
},
"clear": False,
}
with patch.object(
mine, "_mine_send", MagicMock(side_effect=lambda x, y: x)
), patch.dict(
mine.__opts__, {"file_client": "remote", "id": "webserver"}
), patch.dict(
mine.__salt__,
{
"config.merge": MagicMock(return_value=config_mine_functions),
"grains.get": lambda x: kernel_ret,
"network.ip_addrs": MagicMock(return_value=ip_ret),
"foo.bar": MagicMock(return_value=foo_ret),
},
):
# Verify the correct load
assert mine.update() == mock_load
def test_delete_local(mock_cache):
"""
Tests the ``delete``-function on the minion's local cache.
"""
# Prefill minion cache with a non-ACL value
mock_cache.store("minions/webserver", "mine_cache", {"foobard": "barfood"})
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret = mine.delete("foobard")
assert mock_cache.fetch("minions/webserver", "mine_cache") == {}
def test_delete_master(mock_cache):
"""
Tests whether the ``delete``-function sends the correct data to the master.
"""
# Prefill minion cache with a non-ACL value
mock_cache.store("minions/webserver", "mine_cache", {"foobard": "barfood"})
mock_load = {
"cmd": "_mine_delete",
"fun": "foobard",
"id": "foo",
}
with patch.object(
mine, "_mine_send", MagicMock(side_effect=lambda x, y: x)
), patch.dict(mine.__opts__, {"file_client": "remote", "id": "foo"}):
assert mine.delete("foobard") == mock_load
def test_flush_local(mock_cache):
"""
Tests the ``flush``-function on the minion's local cache.
"""
# Prefill minion cache with a non-ACL value
mock_cache.store("minions/webserver", "mine_cache", {"foobard": "barfood"})
with patch.dict(mine.__opts__, {"file_client": "local", "id": "webserver"}):
ret = mine.flush()
assert mock_cache.fetch("minions/webserver", "mine_cache") == {}
def test_flush_master():
"""
Tests whether the ``flush``-function sends the correct data to the master.
"""
mock_load = {"cmd": "_mine_flush", "id": "foo"}
with patch.object(
mine, "_mine_send", MagicMock(side_effect=lambda x, y: x)
), patch.dict(mine.__opts__, {"file_client": "remote", "id": "foo"}):
assert mine.flush() == mock_load
def test_valid():
"""
Tests the ``valid``-function.
Note that mine functions defined as list are returned in dict format.
Mine functions that do not exist in __salt__ are not returned.
"""
config_mine_functions = {
"network.ip_addrs": [],
"kernel": [
{"mine_function": "grains.get"},
"kernel",
{"os": "win32", "v": "2018"},
],
"fubar": [{"mine_function": "does.not_exist"}],
}
with patch.dict(
mine.__salt__,
{
"config.merge": MagicMock(return_value=config_mine_functions),
"network.ip_addrs": lambda: True,
"grains.get": lambda: True,
},
):
ret = mine.valid()
# list cant be made to set "dict can't be hashed" and order changes
assert isinstance(ret["kernel"]["grains.get"], list)
assert len(ret["kernel"]["grains.get"]) == 3
for item in ("kernel", {"os": "win32"}, {"v": "2018"}):
assert item in ret["kernel"]["grains.get"]
ret["kernel"]["grains.get"] = None
assert ret == {"network.ip_addrs": [], "kernel": {"grains.get": None}}
def test_get_docker():
"""
Test for Get all mine data for 'docker.ps' and run an
aggregation.
"""
ps_response = {
"localhost": {
"host": {
"interfaces": {
"docker0": {
"hwaddr": "88:99:00:00:99:99",
"inet": [
{
"address": "172.17.42.1",
"broadcast": None,
"label": "docker0",
"netmask": "255.255.0.0",
}
],
"inet6": [
{
"address": "ffff::eeee:aaaa:bbbb:8888",
"prefixlen": "64",
}
],
"up": True,
},
"eth0": {
"hwaddr": "88:99:00:99:99:99",
"inet": [
{
"address": "192.168.0.1",
"broadcast": "192.168.0.255",
"label": "eth0",
"netmask": "255.255.255.0",
}
],
"inet6": [
{
"address": "ffff::aaaa:aaaa:bbbb:8888",
"prefixlen": "64",
}
],
"up": True,
},
}
},
"abcdefhjhi1234567899": { # container Id
"Ports": [
{
"IP": "0.0.0.0", # we bind on every interfaces
"PrivatePort": 80,
"PublicPort": 80,
"Type": "tcp",
}
],
"Image": "image:latest",
"Info": {"Id": "abcdefhjhi1234567899"},
},
}
}
with patch.object(mine, "get", return_value=ps_response):
ret = mine.get_docker()
# Sort ifaces since that will change between py2 and py3
ret["image:latest"]["ipv4"][80] = sorted(ret["image:latest"]["ipv4"][80])
assert ret == {
"image:latest": {"ipv4": {80: sorted(["172.17.42.1:80", "192.168.0.1:80"])}}
}
def test_get_docker_with_container_id():
"""
Test for Get all mine data for 'docker.ps' and run an
aggregation.
"""
ps_response = {
"localhost": {
"host": {
"interfaces": {
"docker0": {
"hwaddr": "88:99:00:00:99:99",
"inet": [
{
"address": "172.17.42.1",
"broadcast": None,
"label": "docker0",
"netmask": "255.255.0.0",
}
],
"inet6": [
{
"address": "ffff::eeee:aaaa:bbbb:8888",
"prefixlen": "64",
}
],
"up": True,
},
"eth0": {
"hwaddr": "88:99:00:99:99:99",
"inet": [
{
"address": "192.168.0.1",
"broadcast": "192.168.0.255",
"label": "eth0",
"netmask": "255.255.255.0",
}
],
"inet6": [
{
"address": "ffff::aaaa:aaaa:bbbb:8888",
"prefixlen": "64",
}
],
"up": True,
},
}
},
"abcdefhjhi1234567899": { # container Id
"Ports": [
{
"IP": "0.0.0.0", # we bind on every interfaces
"PrivatePort": 80,
"PublicPort": 80,
"Type": "tcp",
}
],
"Image": "image:latest",
"Info": {"Id": "abcdefhjhi1234567899"},
},
}
}
with patch.object(mine, "get", return_value=ps_response):
ret = mine.get_docker(with_container_id=True)
# Sort ifaces since that will change between py2 and py3
ret["image:latest"]["ipv4"][80] = sorted(ret["image:latest"]["ipv4"][80])
assert ret == {
"image:latest": {
"ipv4": {
80: sorted(
[
("172.17.42.1:80", "abcdefhjhi1234567899"),
("192.168.0.1:80", "abcdefhjhi1234567899"),
]
)
}
}
}
| saltstack/salt | tests/pytests/unit/modules/test_mine.py | Python | apache-2.0 | 21,050 |
#!/usr/bin/env python
import os
import time
username = 'root'
defaultdb = 'postgres'
port = '5433'
backupdir='/www/backup/'
date = time.strftime('%Y-%m-%d')
#GET DB NAMES
get_db_names="psql -U%s -d%s -p%s --tuples-only -c '\l' | awk -F\| '{ print $1 }' | grep -E -v '(template0|template1|^$)'" % (username, defaultdb, port)
#MAKE BACKUP OF SYSTEMGRANTS
os.popen("pg_dumpall -p%s -g|gzip -9 -c > %s/system.%s.gz" % (port, backupdir, date))
#MAKING DB BACKUP
for base in os.popen(get_db_names).readlines():
base = base.strip()
fulldir = backupdir + base
if not os.path.exists(fulldir):
os.mkdir(fulldir)
filename = "%s/%s-%s.sql" % (fulldir, base, date)
os.popen("nice -n 19 pg_dump -C -F c -U%s -p%s %s > %s" % (username, port, base, filename))
| ActiveState/code | recipes/Python/577793_PostgreSQL_database/recipe-577793.py | Python | mit | 804 |
#!/usr/bin/python3
"""
Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2]
< nums[3]....
Example 1:
Input: nums = [1, 5, 1, 1, 6, 4]
Output: One possible answer is [1, 4, 1, 5, 1, 6].
Example 2:
Input: nums = [1, 3, 2, 2, 3, 1]
Output: One possible answer is [2, 3, 1, 3, 1, 2].
Note:
You may assume all input has valid answer.
Follow Up:
Can you do it in O(n) time and/or in-place with O(1) extra space?
"""
from typing import List
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
Median + 3-way partitioning
"""
n = len(nums)
# mid = self.find_kth(nums, 0, n, (n - 1) // 2)
# median = nums[mid]
median = list(sorted(nums))[n//2]
# three way pivot
odd = 1
even = n - 1 if (n - 1) % 2 == 0 else n - 2
i = 0
while i < n:
if nums[i] < median:
if i >= even and i % 2 == 0:
i += 1
continue
nums[i], nums[even] = nums[even], nums[i]
even -= 2
elif nums[i] > median:
if i <= odd and i % 2 == 1:
i += 1
continue
nums[i], nums[odd] = nums[odd], nums[i]
odd += 2
else:
i += 1
def find_kth(self, A, lo, hi, k):
p = self.pivot(A, lo, hi)
if k == p:
return p
elif k > p:
return self.find_kth(A, p + 1, hi, k)
else:
return self.find_kth(A, lo, p, k)
def pivot(self, A, lo, hi):
# need 3-way pivot, otherwise TLE
p = lo
closed = lo
for i in range(lo + 1, hi):
if A[i] < A[p]:
closed += 1
A[closed], A[i] = A[i], A[closed]
A[closed], A[p] = A[p], A[closed]
return closed
if __name__ == "__main__":
Solution().wiggleSort([1, 5, 1, 1, 6, 4])
| algorhythms/LeetCode | 324 Wiggle Sort II py3.py | Python | mit | 2,047 |
import time
from bsddb3 import db
import random
import os
# Make sure you run "mkdir /tmp/my_db" first!
DA_FILE_HS = "/tmp/dfagnan_db/hash_db"
DB_SIZE = 100000
SEED = 10000000
def get_random():
return random.randint(0, 63)
def get_random_char():
return chr(97 + random.randint(0, 25))
def main():
print("------------------------")
print("HashDB")
print("------------------------")
databaseHS = db.DB()
try:
#create a Hash file
databaseHS.open(DA_FILE_HS,None, db.DB_HASH, db.DB_CREATE)
except:
print("Error creating file.")
while(1):
print("| 1: Create and populate |\n| 2: search with key |\n| 3: search with data |\n| 4: search with key range |\n| 5: Destroy Database |\n| 6: Quit |\n")
user_select = input("Select an option: \n")
if user_select == '1':
random_populate(databaseHS)
if user_select == '2':
search_with_key(databaseHS)
elif user_select == '3':
search_with_data(databaseHS)
elif user_select == '4':
search_with_key_range(databaseHS)
elif user_select == '5':
print("deleting database")
try:
databaseHS.close()
if os.path.exists('/tmp/dfagnan_db/hash_db'):
os.remove('/tmp/dfagnan_db/hash_db')
except Exception as e:
print (e)
elif user_select == '6':
open("answers.txt", "w").close()
break
elif user_select == '0':
print_db(databaseHS)
def search_with_key(databaseHS):
## Retreive records with a given key
user_key = input("Enter a key to search for: ")
start_time = time.time()
result = databaseHS.get(user_key.encode(encoding='UTF-8'))
end_time = (time.time() - start_time)
answer_file = open("answers.txt", "a")
answer_file.write(str(user_key.encode(encoding='UTF-8')) + "\n")
answer_file.write(str(result) + "\n")
answer_file.write("\n")
answer_file.close()
print("------------------------")
if result:
print("Num entries found: 1")
else:
print("No entry found")
print("time: " + str(end_time * 1000000) + " micro-seconds")
print("------------------------")
def search_with_key_range(databaseHS):
user_lower_range = input("Input an lower key value: ")
user_upper_range = input("Input an upper key value: ")
curs = databaseHS.cursor()
found_entries = []
start_time = time.time()
iter = curs.first()
while iter:
if(iter[0] >= user_lower_range.encode(encoding='UTF-8') and (iter[0] <= user_upper_range.encode(encoding='UTF-8'))):
found_entries.append(iter)
iter = curs.next()
end_time = (time.time() - start_time)
# Append to answer file
answer_file = open("answers.txt", "a")
for entry in found_entries:
answer_file.write(str(entry[0]) + "\n")
answer_file.write(str(entry[1]) + "\n")
answer_file.write("\n")
answer_file.close()
print("------------------------")
print("Num entries found: " + str(len(found_entries)))
print("\ntime: " + str(end_time * 1000000) + " micro-seconds")
print("------------------------")
def search_with_data(databaseHS):
user_data = input("Enter data to be searched for: ")
curs = databaseHS.cursor()
found = []
start_time = time.time()
iter = curs.first()
while iter:
if(iter[1] == user_data.encode(encoding='UTF-8')):
found.append(iter)
iter = curs.next()
end_time = (time.time() - start_time)
answer_file = open("answers.txt", "a")
for entry in found:
answer_file.write(str(entry[1]) + "\n")
answer_file.write(str(entry[0]) + "\n")
answer_file.write("\n")
answer_file.close()
print("------------------------")
print("Num entries found: " + str(len(found)))
print("time: " + str(end_time * 1000000) + " micro-seconds")
print("------------------------")
def print_db(db):
curs = db.cursor()
iter = curs.first()
while iter:
print(iter)
iter = curs.next()
def random_populate(db):
random.seed(SEED)
for index in range(DB_SIZE):
krng = 64 + (get_random())
key = ""
for i in range(krng):
key += str(get_random_char())
vrng = 64 + (get_random())
value = ""
for i in range(vrng):
value += str(get_random_char())
#print (key)
#print (value)
#print ("")
key = key.encode(encoding='UTF-8')
value = value.encode(encoding='UTF-8')
db.put(key, value);
if __name__ == "__main__":
main()
| deric92/C291Project2 | hashdb.py | Python | mit | 4,228 |
# coding: utf8
"""
系统常量
"""
TICKETS_JSON_URL = 'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=%s&leftTicketDTO.from_station' \
'=%s&leftTicketDTO.to_station=%s&purpose_codes=ADULT'
STATION_NAME_JS_URL = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9001'
USER_AGENT = 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) ' \
'Version/4.0.4 Mobile/7B314 Safari/531.21.10'
REFER_URL = 'https://kyfw.12306.cn'
# SendCloud API
SEND_CLOUD_API_URL = 'http://www.sendcloud.net/webapi/mail.send_template.json'
# SendCloud TEMPLATE_NAME
SEND_CLOUD_TEMPLATE_NAME = 'ticket_query'
# SendCloud API_USER
SEND_CLOUD_API_USER = '$SEND_CLOUD_API_USER'
# SendCloud API_KEY
SEND_CLOUD_API_KEY = '$SEND_CLOUD_API_KEY'
| cls1991/12306-ticket-query | share/const.py | Python | apache-2.0 | 856 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.BillingCycleListView.as_view(), name='list'),
url(r'^enact/(?P<uuid>.+)/$', views.CreateTransactionsView.as_view(), name='enact'),
url(r'^reenact/(?P<uuid>.+)/$', views.RecreateTransactionsView.as_view(), name='reenact'),
url(r'^unenact/(?P<uuid>.+)/$', views.DeleteTransactionsView.as_view(), name='unenact'),
url(r'^send/(?P<uuid>.+)/$', views.SendNotificationsView.as_view(), name='send'),
]
| adamcharnock/swiftwind | swiftwind/billing_cycle/urls.py | Python | mit | 503 |
#/#############################################################################
#
# Stephan Neuhausen.
# Copyright (C) 20014-TODAY Stephan Neuhausen iad.de.
#
#/#############################################################################
import room
| SNeuhausen/training_management | models/room/__init__.py | Python | gpl-3.0 | 259 |
# TODO: switch this on with an environ variable or something.
# and document.
#def setup_package():
# import tests._util
# tests._util.enable_coercion_blocker()
| mmerickel/flatland | tests/__init__.py | Python | mit | 169 |
from __future__ import print_function
import os
from setuptools import setup
ROOT = os.path.dirname(__file__)
# retrieve package information
about = {}
with open(os.path.join(ROOT, 'jumpssh', '__version__.py')) as version_file:
exec(version_file.read(), about)
with open(os.path.join(ROOT, 'README.rst')) as readme_file:
readme = readme_file.read()
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
maintainer=about['__maintainer__'],
maintainer_email=about['__maintainer_email__'],
description=about['__description__'],
long_description=readme,
# long_description_content_type='text/markdown',
url=about['__url__'],
download_url=about['__download_url__'],
license=about['__license__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: System',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Shells',
'Topic :: System :: Software Distribution',
'Topic :: Terminals',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=['jumpssh'],
platforms='Unix; MacOS X',
install_requires=[
'paramiko'
],
)
| t-cas/JumpSSH | setup.py | Python | mit | 1,992 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from itertools import ifilter
from flask_pluginengine import plugin_context
from sqlalchemy.orm import lazyload, joinedload, noload
from werkzeug.urls import url_parse
from indico.modules.events.layout import layout_settings
from indico.modules.events.features import event_settings as features_event_settings
from MaKaC.common.timezoneUtils import datetimeToUnixTimeInt
from MaKaC.fossils.subcontribution import ISubContribParticipationFossil,\
ISubContribParticipationFullFossil, ISubContributionFossil, ISubContributionWithSpeakersFossil
from MaKaC.fossils.contribution import IContributionParticipationFossil,\
IContributionFossil, IContributionWithSpeakersFossil, IContributionParticipationMinimalFossil, \
IContributionWithSubContribsFossil,\
IContributionParticipationTTDisplayFossil, \
IContributionParticipationTTMgmtFossil
from MaKaC.fossils.conference import IConferenceMinimalFossil, \
IConferenceEventInfoFossil, IConferenceFossil,\
ISessionFossil, ISessionSlotFossil, IMaterialMinimalFossil,\
IMaterialFossil, IConferenceParticipationFossil,\
IResourceMinimalFossil, ILocalFileMinimalFossil,\
IResourceFossil, ILocalFileFossil,\
ILocalFileExtendedFossil, IConferenceParticipationMinimalFossil,\
ICategoryFossil, ILocalFileAbstractMaterialFossil
from MaKaC.common.fossilize import fossilizes, Fossilizable
from MaKaC.common.url import ShortURLMapper
from MaKaC.contributionReviewing import Review
from indico.modules.events.models.legacy_mapping import LegacyEventMapping
from indico.modules.categories.models.legacy_mapping import LegacyCategoryMapping
from indico.modules.rb.models.rooms import Room
from indico.modules.users.legacy import AvatarUserWrapper
from indico.modules.groups.legacy import GroupWrapper
from indico.util.caching import memoize_request
from indico.util.i18n import L_
from indico.util.string import safe_upper, safe_slice, fix_broken_string, return_ascii, is_legacy_id, to_unicode
from MaKaC.review import AbstractFieldContent
import re
import os
import copy
import stat
from datetime import datetime, timedelta
from flask import session, request, has_request_context
from MaKaC.contributionReviewing import ReviewManager
from MaKaC.paperReviewing import ConferencePaperReview as ConferencePaperReview
from MaKaC.abstractReviewing import ConferenceAbstractReview as ConferenceAbstractReview
from pytz import timezone
from pytz import all_timezones
from persistent import Persistent
from BTrees.OOBTree import OOBTree, OOTreeSet
from BTrees.OIBTree import OIBTree,OISet,union
import MaKaC
from MaKaC.common import indexes
from MaKaC.common.timezoneUtils import nowutc, maxDatetime
import MaKaC.fileRepository as fileRepository
from MaKaC.schedule import (ConferenceSchedule, SessionSchedule, SlotSchTypeFactory, ContribSchEntry,
LinkedTimeSchEntry, BreakTimeSchEntry)
import MaKaC.review as review
from MaKaC.common import utils
from MaKaC.common.Counter import Counter
from MaKaC.common.ObjectHolders import ObjectHolder
from MaKaC.common.Locators import Locator
from MaKaC.accessControl import AccessController
from MaKaC.errors import MaKaCError, TimingError, ParentTimingError, EntryTimingError, NotFoundError, FormValuesError
from MaKaC import registration
from MaKaC.trashCan import TrashCanManager
from MaKaC.user import AvatarHolder
from MaKaC.common import pendingQueues
from MaKaC.common.info import HelperMaKaCInfo
from MaKaC.participant import Participation
from MaKaC.badge import BadgeTemplateManager
from MaKaC.poster import PosterTemplateManager
from MaKaC.common import mail
from MaKaC.i18n import _
from MaKaC.common.PickleJar import Updates
from MaKaC.schedule import ScheduleToJson
from indico.core.logger import Logger
from MaKaC.common.contextManager import ContextManager
import zope.interface
from indico.core import signals
from indico.core.db import DBMgr, db
from indico.core.db.event import SupportInfo
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.core.config import Config
from indico.core.index import IIndexableByStartDateTime, IUniqueIdProvider, Catalog
from indico.modules.events.logs import EventLogEntry, EventLogRealm, EventLogKind
from indico.modules.attachments.models.attachments import AttachmentType, Attachment
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.attachments.util import get_attached_items
from indico.util.date_time import utc_timestamp, format_datetime, format_human_timedelta
from indico.util.signals import values_from_signal
from indico.util.redis import write_client as redis_write_client
from indico.util.user import unify_user_args
from indico.util.redis import avatar_links
from indico.web.flask.util import url_for
class CoreObject(Persistent):
"""
CoreObjects are Persistent objects that are employed by Indico's core
"""
zope.interface.implements(IUniqueIdProvider,
IIndexableByStartDateTime)
def setModificationDate(self, date=None):
"""
Method called to notify the current object has been modified.
"""
if not date:
date = nowutc()
self._modificationDS = date
def __conform__(self, proto):
if proto == IIndexableByStartDateTime:
return utc_timestamp(self.getStartDate())
else:
return None
@memoize_request
def _get_room_mapping():
return {(r.location.name, r.name): r for r in Room.query.options(lazyload(Room.owner), joinedload(Room.location))}
class Locatable:
"""
Inherited by objects that imply a physical location:
* Conferences
* Sessions
* SessionSlots
* Contributions
* SubContributions
"""
@property
def rb_room(self):
if not self.getLocation() or not self.getRoom():
return None
location = self.getLocation().getName()
room = self.getRoom().getName()
if not location or not room:
return None
key = fix_broken_string(location, True), fix_broken_string(room, True)
return _get_room_mapping().get(key)
def getLocationParent(self):
"""
Returns the object the location info should be inherited from
(Overridden)
"""
raise Exception("Unimplemented method")
def getLocation(self):
if self.getOwnLocation():
return self.getOwnLocation()
return self.getInheritedLocation()
def getOwnLocation(self):
if len(self.places) > 0:
return self.places[0]
return None
def getInheritedLocation(self):
return self.getLocationParent().getLocation()
def getOwnRoom(self):
if len(self.rooms) > 0:
return self.rooms[0]
return None
def getRoom(self):
if self.getOwnRoom():
return self.getOwnRoom()
return self.getInheritedRoom()
def getInheritedRoom(self):
return self.getLocationParent().getRoom()
def setLocation(self, newLocation):
oldLocation = self.getOwnLocation()
if newLocation is None:
if len(self.places) > 0:
del self.places[0]
elif len(self.places) > 0:
self.places[0] = newLocation
else:
self.places.append(newLocation)
self.notifyModification()
def setRoom(self, newRoom):
oldRoom = self.getOwnRoom()
if newRoom is None:
if len(self.rooms) > 0:
del self.rooms[0]
elif len(self.rooms) > 0:
self.rooms[0] = newRoom
else:
self.rooms.append(newRoom)
self.notifyModification()
class CommonObjectBase(CoreObject, Fossilizable):
"""
This class is for holding commonly used methods that are used by several classes.
It is inherited by the following classes:
* Category
* Conference
* Session
* Contribution
* SubContribution
* Material
* Resource
"""
def getRecursiveManagerList(self):
av_set = set()
# Get the AccessProtectionLevel for this
apl = self.getAccessProtectionLevel()
if apl == -1:
pass
elif apl == 1:
for av in self.getManagerList():
av_set.add(av)
for av in self.getOwner().getRecursiveManagerList():
av_set.add(av)
else:
for av in self.getManagerList():
av_set.add(av)
if self.getOwner():
for av in self.getOwner().getRecursiveManagerList():
av_set.add(av)
return list(av_set)
def getRecursiveAllowedToAccessList(self, onlyManagers=False):
"""Returns a set of Avatar resp. Group objects for those people resp.
e-groups allowed to access this object as well as all parent objects.
"""
# Initialize set of avatars/groups: this will hold those
# people/groups explicitly
# allowed to access this object
av_set = set()
# Get the AccessProtectionLevel for this
apl = self.getAccessProtectionLevel()
# If this object is "absolutely public", then return an empty set
if apl == -1:
pass
# If this object is protected "all by itself", then get the list of
# people/groups allowed to access it, plus managers of owner(s)
elif apl == 1:
al = self.getAllowedToAccessList() + self.getManagerList() + \
self.getOwner().getRecursiveManagerList()
if al is not None:
for av in al:
av_set.add(av)
# If access settings are inherited (and PRIVATE) from its owners, look at those.
elif apl == 0 and self.isProtected():
# If event is protected, then get list of people/groups allowed
# to access, and add that to the set of avatars.
al = self.getAllowedToAccessList() + self.getManagerList()
if al is not None:
for av in al:
av_set.add(av)
# Add list of avatars/groups allowed to access parents objects.
owner = self.getOwner()
if owner is not None:
owner_al = owner.getRecursiveAllowedToAccessList(onlyManagers=True)
if owner_al is not None:
for av in owner_al:
av_set.add(av)
# return set containing whatever avatars/groups we may have collected
return av_set
def canIPAccess(self, ip):
domains = self.getAccessController().getAnyDomainProtection()
if domains:
return any(domain.belongsTo(ip) for domain in domains)
else:
return True
@property
@memoize_request
def attached_items(self):
"""
CAUTION: this won't return empty directories (used by interface), nor things the
current user can't see
"""
if isinstance(self, (Contribution, Session, SubContribution, Conference, Category)):
return get_attached_items(self, include_empty=False, include_hidden=False, preload_event=True)
else:
raise ValueError("Object of type '{}' cannot have attachments".format(type(self)))
def attach_links(self, links, user=None):
"""
Adds links from a mapping which is usually passed to the various
JSON-RPC endpoints when importing contributions from outside,
usually using the importer plugin.
:param links: A dict mapping folder names (empty for the
default folder) to lists of URLs.
:param user: The user who initiated the action. Defaults to
the current session user.
"""
if not isinstance(self, (Contribution, Session, SubContribution, Conference)):
raise ValueError("Object of type '{}' does not support attach_links".format(type(self)))
if not links:
return
for folder_title, urls in links.iteritems():
if not urls:
continue
folder = AttachmentFolder.get_or_create(self, folder_title or None)
db.session.add(folder)
for url in urls:
link = Attachment(user=session.user, type=AttachmentType.link, link_url=url,
title=url_parse(url).netloc, description=url)
folder.all_attachments.append(link)
db.session.flush()
signals.attachments.attachment_created.send(link, user=link.user, internal=True)
def remove_attachments(self):
"""
Send 'deleted' signal to all attachments/folders
"""
# TODO 2.0: do not delete them, only set is_deleted on parent
attachments = get_attached_items(self)
if attachments:
for folder in attachments['folders']:
folder.is_deleted = True
signals.attachments.folder_deleted.send(folder, user=session.user, internal=True)
for attachment in folder.attachments:
attachment.is_deleted = True
for attachment in attachments['files']:
attachment.is_deleted = True
signals.attachments.attachment_deleted.send(attachment, user=session.user, internal=True)
class CategoryManager(ObjectHolder):
idxName = "categories"
counterName = "CATEGORY"
def getById(self, id_, quiet=False):
orig_id = id_ = str(id_)
if is_legacy_id(id_):
mapping = LegacyCategoryMapping.find_first(legacy_category_id=id_)
id_ = str(mapping.category_id) if mapping is not None else None
category = self._getIdx().get(id_) if id_ is not None else None
if category is None and not quiet:
raise KeyError(id_ if id_ is not None else orig_id)
return category
def add(self, category):
ObjectHolder.add(self, category)
# Add category to the name index
nameIdx = indexes.IndexesHolder().getIndex('categoryName')
nameIdx.index(category)
def remove(self, category):
ObjectHolder.remove(self, category)
# remove category from the name index
nameIdx = indexes.IndexesHolder().getIndex('categoryName')
nameIdx.unindex(category)
Catalog.getIdx('categ_conf_sd').remove_category(category.getId())
def _newId(self):
"""
returns a new id for the category
the id must not already exist in the collection
"""
id = ObjectHolder._newId(self)
while self.hasKey(id):
id = ObjectHolder._newId(self)
return id
def getRoot(self):
root = DBMgr.getInstance().getDBConnection().root()
if not root.has_key("rootCategory"):
r = Category()
r.setName("Home")
self.add(r)
root["rootCategory"] = r
return root["rootCategory"]
def getDefaultConference(self):
dconf = HelperMaKaCInfo.getMaKaCInfoInstance().getDefaultConference()
if dconf == None:
return HelperMaKaCInfo.getMaKaCInfoInstance().setDefaultConference(DefaultConference())
else:
return dconf
class Category(CommonObjectBase):
fossilizes(ICategoryFossil)
def __init__(self):
self.id = ""
self.name = ""
self.description = ""
self.subcategories = {}
self.conferences = OOTreeSet()
self._numConferences = 0
self.owner = None
self._defaultStyle = {"simple_event": "", "meeting": ""}
self._order = 0
self.__ac = AccessController(self)
self.__confCreationRestricted = 1
self.__confCreators = []
self._visibility = 999
self._statistics = {"events": None, "contributions": None, "files": None,
"users": None, "updated": None}
self._icon = None
self._tasksAllowed = False
self._tasks = {}
self._taskIdGenerator = 0
self._tasksPublic = True
self._tasksCommentPublic = True
self._tasksManagers = []
self._tasksCommentators = []
self._taskAccessList = []
self._timezone = ""
self._notifyCreationList = ""
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
return cmp(self.getId(), other.getId())
@return_ascii
def __repr__(self):
path = '/'.join(self.getCategoryPathTitles()[:-1])
return '<Category({0}, {1}, {2})>'.format(self.getId(), self.getName(), path)
@property
def url(self):
if self.isRoot():
return url_for('misc.index')
else:
return url_for('category.categoryDisplay', self)
def getAccessController(self):
return self.__ac
def updateNonInheritingChildren(self, elem, delete=False):
pass
def getNotifyCreationList(self):
""" self._notifyCreationList is a string containing the list of
email addresses to send an email to when a new event is created"""
try:
return self._notifyCreationList
except:
self._notifyCreationList = ""
return self._notifyCreationList
def setNotifyCreationList(self, value):
self._notifyCreationList = value
def getUniqueId(self):
return "cat%s" % self.getId()
def getTaskList(self):
try:
return self._tasks.values()
except:
self._tasks = {}
return self._tasks.values()
def getTitle(self):
return self.name
def getTasks(self):
try:
return self._tasks
except:
self._tasks = {}
return self._tasks
def getTask(self, taskId):
return self.getTasks().get(taskId, None)
def _getTasksAllowed(self):
try:
return self._tasksAllowed
except:
self._tasksAllowed = False
return self._tasksAllowed
def tasksAllowed(self):
if self.hasSubcategories():
return False
return self._getTasksAllowed()
def setTasksAllowed(self):
if self.hasSubcategories():
return False
self._getTasksAllowed()
self._tasksAllowed = True
self.notifyModification()
return True
def setTasksForbidden(self):
if len(self.getTaskList()) > 0:
return False
self._getTasksAllowed()
self._tasksAllowed = False
self.notifyModification()
return False
def _getNewTaskId(self):
try:
if self._taskIdGenerator:
pass
except:
self._taskIdGenerator = 0
self._taskIdGenerator = self._taskIdGenerator + 1
return self._taskIdGenerator
def newTask(self, user):
if user is None:
return None
newTask = task.Task(self, self._getNewTaskId(), user)
self.getTasks()["%s" % newTask.getId()] = newTask
self.notifyModification()
return newTask
def tasksPublic(self):
try:
return self._tasksPublic
except:
self._tasksPublic = True
return self._tasksPublic
def setTasksPublic(self):
self.tasksPublic()
self._tasksPublic = True
def setTasksPrivate(self):
self.tasksPublic()
self._tasksPublic = False
def tasksCommentPublic(self):
try:
return self._tasksCommentPublic
except:
self._tasksCommentPublic = True
return self._tasksCommentPublic
def setTasksCommentPublic(self):
self.tasksCommentPublic()
self._tasksCommentPublic = True
def setTasksCommentPrivate(self):
self.tasksCommentPublic()
self._tasksCommentPublic = False
def getTasksManagerList(self):
try:
return self._tasksManagers
except:
self._tasksManagers = []
self._p_changed = 1
return self._tasksManagers
def getTasksManager(self, index):
length = len(self.getTasksManagerList())
if index < 0 or index >= length:
return None
return self._tasksManagers[index]
def addTasksManager(self, user):
if user is None:
return False
self.getTasksManagerList().append(user)
self._p_changed = 1
return True
def removeTasksManager(self, index):
length = len(self.getTasksManagerList())
if index < 0 or index >= length:
return False
del self.getTasksManagerList()[index]
self._p_changed = 1
return True
def getTasksCommentatorList(self):
try:
return self._tasksCommentators
except:
self._tasksCommentators = []
self._p_changed = 1
return self._tasksCommentators
def getTasksCommentator(self, index):
length = len(self.getTasksCommentatorList())
if index < 0 or index >= length:
return None
return self._tasksCommentators[index]
def addTasksCommentator(self, user):
if user is None:
return False
self.getTasksCommentatorList().append(user)
self._p_changed = 1
return True
def removeTasksCommentator(self, index):
length = len(self.getTasksCommentatorList())
if index < 0 or index >= length:
return False
del self._tasksCommentators[index]
self._p_changed = 1
return True
def getTasksAccessList(self):
try:
return self._tasksAccessList
except:
self._tasksAccessList = []
self._p_changed = 1
return self._tasksAccessList
def getTasksAccessPerson(self, index):
length = len(self.getTasksAccessList())
if index < 0 or index >= length:
return None
return self._tasksAccessList[index]
def addTasksAccessPerson(self, user):
if user is None:
return False
self.getTasksAccessList().append(user)
self._p_changed = 1
return True
def removeTasksAccessPerson(self, index):
length = len(self.getTasksAccessList())
if index < 0 or index >= length:
return False
del self.getTasksAccessList()[index]
self._p_changed = 1
return True
def hasSubcategories(self):
return len(self.subcategories.values()) > 0
def getVisibility(self):
"""
Returns category visibility, considering that it can be
restricted by parent categories
"""
owner = self.getOwner()
visibility = int(self._visibility)
# visibility can be restricted by parent categories
if owner:
return max(0, min(visibility, owner.getVisibility() + 1))
else:
return visibility
def setVisibility(self, visibility=999):
self._visibility = int(visibility)
self._reindex()
def isSuggestionsDisabled(self):
try:
return self._suggestions_disabled
except AttributeError:
self._suggestions_disabled = False
return False
def setSuggestionsDisabled(self, value):
self._suggestions_disabled = value
def _reindex(self):
catIdx = indexes.IndexesHolder().getIndex('category')
catIdx.reindexCateg(self)
catDateIdx = indexes.IndexesHolder().getIndex('categoryDate')
catDateIdx.reindexCateg(self)
catDateAllIdx = indexes.IndexesHolder().getIndex('categoryDateAll')
catDateAllIdx.reindexCateg(self)
def isRoot(self):
#to be improved
return self.owner is None
def getDefaultStyle(self, type):
try:
return self._defaultStyle[type]
except:
return ""
def setDefaultStyle(self, type, style, subcatsStyle=False):
try:
self._defaultStyle[type] = style
except:
self._defaultStyle = {"simple_event": "", "meeting": ""}
self._defaultStyle[type] = style
self.notifyModification()
#raise str(subcatsStyle)
if subcatsStyle:
categ = self.getSubCategoryList()
for cat in categ:
cat.setDefaultStyle(type, style, subcatsStyle)
##################################
# Fermi timezone awareness #
##################################
def getTimezone(self):
try:
if self._timezone not in all_timezones:
self.setTimezone('UTC')
return self._timezone
except:
self.setTimezone('UTC')
return 'UTC'
def setTimezone(self, tz):
self._timezone = tz
def changeConfTimezones(self, tz):
for conference in self.getConferenceList():
conference.moveToTimezone(tz)
##################################
# Fermi timezone awareness(end) #
##################################
def getOrder(self):
try:
return self._order
except:
self._order = 0
return 0
def setOrder(self, order):
self._order = order
def getId(self):
return self.id
def setId(self, newId):
self.id = str(newId.strip())
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated
in a Locator object for the category instance """
d = Locator()
d["categId"] = self.getId()
return d
def getCategory(self):
return self
def getOwner(self):
return self.owner
def setOwner(self, newOwner):
if self.getOwner() is not None and newOwner is not None and self.getOwner() != newOwner:
self.move(newOwner)
else:
self.owner = newOwner
def getCategoryPath(self):
if self.isRoot():
return [self.getId()]
else:
l = self.getOwner().getCategoryPath()
l.append(self.getId())
return l
def iterParents(self):
categ = self
while not categ.isRoot():
categ = categ.getOwner()
yield categ
def getCategoryPathTitles(self):
# Breadcrumbs
breadcrumbs = []
cat = self
while cat:
breadcrumbs.insert(0, cat.getTitle())
cat = cat.getOwner()
return breadcrumbs
def delete(self, deleteConferences=0):
"""removes completely a category (and all its sub-items) from the
system"""
oldOwner = self.getOwner()
if self.isRoot():
raise MaKaCError(_("Root category cannot be deleted"), _("Category"))
if not deleteConferences:
if self.getNumConferences() > 0:
raise MaKaCError(_("This category still contains some conferences, please remove them first"), _("Category"))
for subcateg in self.getSubCategoryList():
subcateg.delete(deleteConferences)
for conference in self.getConferenceList():
self.removeConference(conference, delete=True)
self.getOwner()._removeSubCategory(self)
CategoryManager().remove(self)
for prin in self.__ac.getAccessList():
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "access")
for prin in self.__ac.getModifierList():
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "manager")
TrashCanManager().add(self)
signals.category.deleted.send(self)
return
def move(self, newOwner):
oldOwner = self.getOwner()
catDateIdx = indexes.IndexesHolder().getIndex('categoryDate')
catDateAllIdx = indexes.IndexesHolder().getIndex('categoryDateAll')
catDateIdx.unindexCateg(self)
catDateAllIdx.unindexCateg(self)
self.getOwner()._removeSubCategory(self)
newOwner._addSubCategory(self)
self._reindex()
catDateIdx.indexCateg(self)
catDateAllIdx.indexCateg(self)
signals.category.moved.send(self, old_parent=oldOwner, new_parent=newOwner)
def getName(self):
return self.name
def setName(self, newName):
oldName = self.name
self.name = newName.strip()
# Reindex when name changes
nameIdx = indexes.IndexesHolder().getIndex('categoryName')
nameIdx.unindex(self)
nameIdx.index(self)
signals.category.title_changed.send(self, old=oldName, new=newName)
def getDescription(self):
return self.description
def setDescription(self, newDesc):
self.description = newDesc.strip()
def moveConference(self, conf, toCateg):
"""
Moves a conference from this category to another one
"""
self.removeConference(conf)
toCateg._addConference(conf)
signals.event.moved.send(conf, old_parent=self, new_parent=toCateg)
def _addSubCategory(self, newSc):
#categories can only contain either conferences either other categories
# but can never contain both. For the moment an exception is raised
# but this could be replaced by the following policy: if a
# sub-category is to be added to a category already containing
# conferences then the conferes are moved into the new sub-category
# and it is added to target category.
#first, check that the category is registered if not raise an exception
if len(self.conferences) > 0:
for conf in self.getConferenceList():
self.moveConference(conf, newSc)
if len(self.conferences) > 0:
raise MaKaCError(_("Cannot add subcategory: the current category already contains events"), _("Category"))
newSc.setOwner(self)
self.subcategories[newSc.getId()] = newSc
self._incNumConfs(newSc.getNumConferences())
def _removeSubCategory(self, sc):
"""if the given subcategory belongs to the current category it removes
it from the subcategories list (don't use this method, use delete
instead)
"""
if sc in self.getSubCategoryList():
self._decNumConfs(sc.getNumConferences())
del self.subcategories[sc.getId()]
self._p_changed = True
sc.setOwner(None)
def newSubCategory(self, protection):
cm = CategoryManager()
sc = Category()
cm.add(sc)
# set the protection
sc.setProtection(protection)
Catalog.getIdx('categ_conf_sd').add_category(sc.getId())
signals.category.created.send(sc, parent=self)
self._addSubCategory(sc)
sc.setOrder(self.getSubCategoryList()[-1].getOrder() + 1)
return sc
def _incNumConfs(self, num=1):
"""Increases the number of conferences for the current category in a given number.
WARNING: Only Categories must use this method!!!"""
self._numConferences = self.getNumConferences()
self._numConferences += num
if self.getOwner() is not None:
self.getOwner()._incNumConfs(num)
def _decNumConfs(self, num=1):
"""Decreases the number of conferences for the current category in a given number.
WARNING: Only Categories must use this method!!!"""
self._numConferences = self.getNumConferences()
self._numConferences -= num
if self.getOwner() is not None:
self.getOwner()._decNumConfs(num)
def _addConference(self, newConf):
if len(self.subcategories) > 0:
raise MaKaCError(_("Cannot add event: the current category already contains some sub-categories"), _("Category"))
if newConf.getId() == "":
raise MaKaCError(_("Cannot add to a category an event which is not registered"), _("Category"))
self.conferences.insert(newConf)
newConf.addOwner(self)
self._incNumConfs(1)
self.indexConf(newConf)
def getAccessKey(self):
return ""
def getModifKey(self):
return ""
def indexConf(self, conf):
# Specific for category changes, calls Conference.indexConf()
# (date-related indexes)
catIdx = indexes.IndexesHolder().getIndex('category')
catIdx.indexConf(conf)
conf.indexConf()
def unindexConf(self, conf):
catIdx = indexes.IndexesHolder().getIndex('category')
catIdx.unindexConf(conf)
conf.unindexConf()
def newConference(self, creator):
conf = Conference()
ConferenceHolder().add(conf, creator)
self._addConference(conf)
signals.event.created.send(conf, parent=self)
return conf
def removeConference(self, conf, notify=True, delete=False):
if not (conf in self.conferences):
return
self.unindexConf(conf)
self.conferences.remove(conf)
if delete:
conf.delete()
conf.removeOwner(self, notify)
self._decNumConfs(1)
def getSubCategoryList(self):
subcategs = self.subcategories.values()
cl = []
for categ in subcategs:
cl.append("%04s%s-%s" % (categ.getOrder(), categ.getName().replace("-", ""), categ.getId()))
cl.sort()
res = []
for c in cl:
id = c.split("-")[1]
res.append(self.subcategories[id])
return res
def iteritems(self, *args):
return self.conferences.iteritems(*args)
def itervalues(self, *args):
return self.conferences.itervalues(*args)
def getConferenceList(self, sortType=1):
"""returns the list of conferences included in the current category.
Thanks to the used structure the list is sorted by date.
We can choose other sorting types:
sortType=1--> By date
sortType=2--> Alphabetically
sortType=3--> Alphabetically - Reversed
"""
res = sorted(self.conferences, cmp=Conference._cmpByDate)
if sortType == 2:
res.sort(Conference._cmpTitle)
elif sortType == 3:
res.sort(Conference._cmpTitle)
res = reversed(res)
return res
def iterConferences(self):
"""returns the iterator for conferences.
"""
return self.conferences
def iterAllConferences(self):
"""returns the iterator for conferences in all subcategories.
"""
for conf in self.conferences:
yield conf
for subcateg in self.subcategories.itervalues():
for conf in subcateg.iterAllConferences():
yield conf
def getAllConferenceList(self):
"""returns the list of all conferences included in the current category
and in all its subcategories"""
res = self.getConferenceList()
subcategs = self.getSubCategoryList()
if subcategs != []:
for subcateg in subcategs:
res.extend(subcateg.getAllConferenceList())
return res
def getRelativeEvent(self, which, conf=None):
index = Catalog.getIdx('categ_conf_sd').getCategory(self.getId())
if which == 'first':
return list(index[index.minKey()])[0]
elif which == 'last':
return list(index[index.maxKey()])[-1]
elif which in ('next', 'prev'):
categIter = index.itervalues()
if conf:
prev = None
for c in categIter:
if c == conf:
break
prev = c
nextEvt = next(categIter, None)
if which == 'next':
return nextEvt
else:
return prev
else:
raise AttributeError("'conf' parameter missing")
else:
raise AttributeError("Unknown argument value: '%s'" % which)
def _setNumConferences(self):
self._numConferences = 0
if self.conferences:
self._incNumConfs(len(self.conferences))
else:
for sc in self.getSubCategoryList():
self._incNumConfs(sc.getNumConferences())
def getNumConferences(self):
"""returns the total number of conferences contained in the current
category and all its sub-categories (if any)"""
#this new approach will speed up considerably the counting of category
# conferences. However, it will give non accurate results for
# conferences within many categories (a conference will be counted
# twice in parent categories).
# Besides this approach will generate much more conflict errors. This
# can be reduced by simply isolating the counter in a separate object.
try:
if self._numConferences:
pass
except AttributeError:
self._setNumConferences()
return self._numConferences
def _getRepository(self):
dbRoot = DBMgr.getInstance().getDBConnection().root()
try:
fr = dbRoot["local_repositories"]["main"]
except KeyError, e:
fr = fileRepository.MaterialLocalRepository()
dbRoot["local_repositories"] = OOBTree()
dbRoot["local_repositories"]["main"] = fr
return fr
def removeResource(self, res):
pass
def setIcon(self, iconFile):
iconFile.setOwner(self)
iconFile.setId("icon")
iconFile.archive(self._getRepository())
iconFile.setProtection(-1)
if self.getIcon() is not None:
self._icon.delete()
self._icon = iconFile
self.notifyModification()
def getIcon(self):
try:
if self._icon:
pass
except AttributeError, e:
self._icon = None
return self._icon
def getIconURL(self):
if self.getIcon() is None:
return ""
return self._icon.getURL()
def removeIcon(self):
if self.getIcon() is None:
return
self._icon.delete()
self._icon = None
self.notifyModification()
def recoverIcon(self, icon):
icon.setOwner(self)
if self.getIcon() is not None:
self._icon.delete()
self._icon = icon
icon.recover()
self.notifyModification()
def getManagerList(self):
return self.__ac.getModifierList()
def grantModification(self, prin):
self.__ac.grantModification(prin)
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "manager")
def revokeModification(self, prin):
self.__ac.revokeModification(prin)
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "manager")
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user)
def canUserModify(self, av):
inherited = 0
if self.getOwner() is not None:
inherited = self.getOwner().canUserModify(av)
return inherited or self.__ac.canModify(av)
def getAllowedToAccessList(self):
return self.__ac.getAccessList()
def canKeyAccess(self, aw):
# Categories don't allow access keys
return False
def isProtected(self):
return self.__ac.isProtected()
def getAccessProtectionLevel(self):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected(self):
return self.__ac.isItselfProtected()
def hasAnyProtection(self):
if self.__ac.isProtected() or len(self.getDomainList()) > 0:
return True
if self.getAccessProtectionLevel() == -1: # PUBLIC
return False
if self.getOwner() is not None:
return self.getOwner().hasAnyProtection()
return False
def setProtection(self, private):
"""
Allows to change the category's access protection
"""
oldProtection = 1 if self.isProtected() else -1
self.__ac.setProtection(private)
if oldProtection != private:
signals.category.protection_changed.send(self, old=oldProtection, new=private)
def hasProtectedOwner(self):
return self.__ac._getFatherProtection()
def isAllowedToAccess(self, av):
"""Says whether an avatar can access a category independently of it is
or not protected or domain filtered
"""
if self.__ac.canUserAccess(av) or self.canUserModify(av):
return True
if not self.isItselfProtected() and self.getOwner():
return self.getOwner().isAllowedToAccess(av)
def canView(self, aw):
if self.canAccess(aw):
return True
for conf in self.getConferenceList():
if conf.canView(aw):
return True
for subcateg in self.getSubCategoryList():
if subcateg.canView(aw):
return True
return False
def canAccess(self, aw):
if not self.hasAnyProtection():
return True
if not self.isProtected():
# domain checking only triggered if the category is PUBLIC
return self.canIPAccess(request.remote_addr) or \
self.isAllowedToCreateConference(aw.getUser()) or \
self.isAllowedToAccess(aw.getUser())
return self.isAllowedToCreateConference(aw.getUser()) or \
self.isAllowedToAccess(aw.getUser())
def grantAccess(self, prin):
self.__ac.grantAccess(prin)
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "access")
def revokeAccess(self, prin):
self.__ac.revokeAccess(prin)
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "access")
def isConferenceCreationRestricted(self):
return self.__confCreationRestricted
def restrictConferenceCreation(self):
self.__confCreationRestricted = 1
def allowConferenceCreation(self):
self.__confCreationRestricted = 0
def grantConferenceCreation(self, prin):
if prin not in self.__confCreators:
self.__confCreators.append(prin)
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "creator")
self._p_changed = 1
def revokeConferenceCreation(self, prin):
if prin in self.__confCreators:
self.__confCreators.remove(prin)
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "creator")
self._p_changed = 1
def getConferenceCreatorList(self):
return self.__confCreators
def isAllowedToCreateConference(self, av):
if self.canUserModify(av):
return 1
# Avatar is directly in the list
if av in self.__confCreators:
return 1
# Otherwise, if it is a member of one of the groups in the list...
for group in self.__confCreators:
if isinstance(group, GroupWrapper):
if group.containsUser(av):
return 1
return 0
def canCreateConference(self, av):
if not self.isConferenceCreationRestricted():
return 1
return self.isAllowedToCreateConference(av)
def requireDomain(self, dom):
self.__ac.requireDomain(dom)
signals.category.domain_access_granted.send(self, domain=dom)
def freeDomain(self, dom):
self.__ac.freeDomain(dom)
signals.category.domain_access_revoked.send(self, domain=dom)
def getDomainList(self):
return self.__ac.getRequiredDomainList()
def getStatistics(self):
try:
if self._statistics:
pass
except AttributeError, e:
self._statistics = {}
return self._statistics
def notifyModification(self, raiseEvent=True):
"""Method called to notify the current category has been modified.
"""
if raiseEvent:
signals.category.data_changed.send(self)
self._p_changed = 1
class CustomLocation(Persistent):
def __init__(self, **locationData):
self.name = ""
self.address = ""
self.room = ""
def setValues(self, data):
self.setName(data.get("name", ""))
self.setAddress(data.get("address", ""))
self.setRoom(data.get("room", ""))
def getValues(self):
d = {}
d["name"] = self.getName()
d["address"] = self.getAddress()
d["room"] = self.getRoom()
return d
def clone(self):
newCL = CustomLocation()
newCL.setValues(self.getValues())
return newCL
def setName(self, newName):
self.name = newName
def getName(self):
return self.name
def setAddress(self, newAddress):
self.address = newAddress
def getAddress(self):
return self.address
def setRoom(self, newRoom):
self.room = newRoom
def getRoom(self):
return self.room
class CustomRoom(Persistent):
def __init__(self):
self.name = ""
self.fullName = None
def setValues(self, data):
self.setName(data.get("name", ""))
self.setFullName(data.get("fullName"))
def getValues(self):
d = {}
d["name"] = self.getName()
d["fullName"] = self.getFullName()
return d
def getId(self):
return "Custom"
def clone(self):
newCR = CustomRoom()
newCR.setValues(self.getValues())
return newCR
def setName(self, newName):
self.name = newName.strip()
def getName(self):
return self.name
def retrieveFullName(self, location):
if not location:
return
key = fix_broken_string(location, True), fix_broken_string(self.name, True)
room = _get_room_mapping().get(key)
full_name = room.full_name if room else None
if getattr(self, 'fullName', None) != full_name:
self.fullName = full_name
def setFullName(self, newFullName):
self.fullName = newFullName
def getFullName(self):
if not hasattr(self, 'fullName'):
self.fullName = None
return self.fullName
class ConferenceParticipation(Persistent, Fossilizable):
fossilizes(IConferenceParticipationFossil, IConferenceParticipationMinimalFossil)
def __init__(self):
self._firstName=""
self._surName=""
self._email=""
self._affiliation=""
self._address=""
self._phone=""
self._title=""
self._fax=""
def _notifyModification( self ):
pass
def setValues(self, data):
self.setFirstName(data.get("firstName", ""))
self.setFamilyName(data.get("familyName",""))
self.setAffiliation(data.get("affilation",""))
self.setAddress(data.get("address",""))
self.setEmail(data.get("email",""))
self.setFax(data.get("fax",""))
self.setTitle(data.get("title",""))
self.setPhone(data.get("phone",""))
self._notifyModification()
def getValues(self):
data={}
data["firstName"]=self.getFirstName()
data["familyName"]=self.getFamilyName()
data["affilation"]=self.getAffiliation()
data["address"]=self.getAddress()
data["email"]=self.getEmail()
data["fax"]=self.getFax()
data["title"]=self.getTitle()
data["phone"]=self.getPhone()
return data
def setId(self, newId):
self._id = newId
def getId( self ):
return self._id
def setDataFromAvatar(self,av):
# av is an Avatar object.
if av is None:
return
self.setFirstName(av.getName())
self.setFamilyName(av.getSurName())
self.setEmail(av.getEmail())
self.setAffiliation(av.getOrganisation())
self.setAddress(av.getAddress())
self.setPhone(av.getTelephone())
self.setTitle(av.getTitle())
self.setFax(av.getFax())
self._notifyModification()
def setDataFromOtherCP(self,cp):
# cp is a ConferenceParticipation object.
if cp is None:
return
self.setFirstName(cp.getFirstName())
self.setFamilyName(cp.getFamilyName())
self.setEmail(cp.getEmail())
self.setAffiliation(cp.getAffiliation())
self.setAddress(cp.getAddress())
self.setPhone(cp.getPhone())
self.setTitle(cp.getTitle())
self.setFax(cp.getFax())
self._notifyModification()
def delete( self ):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'firstName')
def setFirstName(self,newName):
tmp=newName.strip()
if tmp==self._firstName:
return
self._firstName=tmp
self._notifyModification()
def getFirstName( self ):
return self._firstName
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'familyName')
def setFamilyName(self,newName):
tmp=newName.strip()
if tmp==self._surName:
return
self._surName=tmp
self._notifyModification()
def getFamilyName( self ):
return self._surName
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'email')
def setEmail(self,newMail):
tmp=newMail.strip()
if tmp==self._email:
return
self._email=newMail.strip()
self._notifyModification()
def getEmail( self ):
return self._email
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'affiliation')
def setAffiliation(self,newAffil):
self._affiliation=newAffil.strip()
self._notifyModification()
def getAffiliation(self):
return self._affiliation
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'address')
def setAddress(self,newAddr):
self._address=newAddr.strip()
self._notifyModification()
def getAddress(self):
return self._address
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'phone')
def setPhone(self,newPhone):
self._phone=newPhone.strip()
self._notifyModification()
def getPhone(self):
return self._phone
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'title')
def setTitle(self,newTitle):
self._title=newTitle.strip()
self._notifyModification()
def getTitle(self):
return self._title
@Updates (['MaKaC.conference.ConferenceParticipation',
'MaKaC.conference.SessionChair',
'MaKaC.conference.SlotChair'], 'fax')
def setFax(self,newFax):
self._fax=newFax.strip()
self._notifyModification()
def getFax(self):
return self._fax
def getFullName( self ):
res = self.getFamilyName()
if self.getFirstName() != "":
if res.strip() != "":
res = "%s, %s"%( res, self.getFirstName() )
else:
res = self.getFirstName()
if self.getTitle() != "":
res = "%s %s"%( self.getTitle(), res )
return res
def getFullNameNoTitle( self ):
res = self.getFamilyName()
if self.getFirstName() != "":
if res.strip() != "":
res = "%s, %s"%( res, self.getFirstName() )
else:
res = self.getFirstName()
return res
def getDirectFullName( self ):
res = "%s %s"%( self.getFirstName(), self.getFamilyName() )
res=res.strip()
if self.getTitle() != "":
res = "%s %s"%( self.getTitle(), res )
return res
def getAbrName(self):
res = self.getFamilyName()
if self.getFirstName():
if res:
res = "%s, " % res
res = "%s%s." % (res, safe_upper(safe_slice(self.getFirstName(), 0, 1)))
return res
@staticmethod
def _cmpFamilyName( cp1, cp2 ):
o1 = "%s %s"%(cp1.getFamilyName(), cp1.getFirstName())
o2 = "%s %s"%(cp2.getFamilyName(), cp2.getFirstName())
o1=o1.lower().strip()
o2=o2.lower().strip()
return cmp( o1, o2 )
class ConferenceChair(ConferenceParticipation, Fossilizable):
fossilizes(IConferenceParticipationFossil)
def __init__(self):
self._conf=None
self._id=""
ConferenceParticipation.__init__(self)
def _notifyModification( self ):
if self._conf != None:
self._conf.notifyModification()
def clone(self):
newCC=ConferenceChair()
newCC.setValues(self.getValues())
return newCC
def getConference(self):
return self._conf
def getId(self):
return self._id
def includeInConference(self,conf,id):
if self.getConference()==conf and self.getId()==id.strip():
return
self._conf=conf
self._id=id
def delete( self ):
self._conf=None
ConferenceParticipation.delete(self)
def getLocator(self):
if self.getConference() is None:
return None
loc=self.getConference().getLocator()
loc["chairId"]=self.getId()
return loc
class SubmitterIndex(Persistent):
"""Index for contribution submitters.
This class allows to index users with submission privileges over the
conference contributions so the owner can answer optimally to the query
if a user has any submission privilege over any contribution
of the conference.
It is implemented by simply using a BTree where the Avatar id is used
as key (because it is unique and non variable) and a list of
contributions over which he has submission privileges is kept as values.
It is the responsability of the index owner (conference contributions)
to keep it up-to-date i.e. notify conference sumitters additions and
removals.
"""
def __init__( self ):
self._idx = OOBTree()
self._idxEmail = OOBTree()
def _getIdxEmail(self):
try:
return self._idxEmail
except:
self._idxEmail = OOBTree()
return self._idxEmail
def getContributions(self,av):
"""Gives a list with the contributions over which a user has
coordination privileges
"""
if av == None:
return []
ret = self._idx.get(av.getId(),[])
if not ret:
self._moveEmailtoId(av)
ret = self._idx.get(av.getId(),[])
return ret
def index(self,av,contrib):
"""Registers in the index a submitter of a contribution.
"""
if av==None or contrib==None:
return
if not self._idx.has_key(av.getId()):
l=[]
self._idx[av.getId()]=l
else:
l=self._idx[av.getId()]
if contrib not in l:
l.append(contrib)
self._idx[av.getId()]=l
def indexEmail(self, email, contrib):
if not email or not contrib:
return
if not self._getIdxEmail().has_key(email):
l = [contrib]
self._getIdxEmail()[email] = l
else:
l = self._getIdxEmail()[email]
if not contrib in l:
l.append(contrib)
self._getIdxEmail()[email] = l
def unindex(self,av,contrib):
if av==None or contrib==None:
return
l=self._idx.get(av.getId(),[])
if contrib in l:
l.remove(contrib)
self._idx[av.getId()]=l
def unindexEmail(self, email, contrib):
if not email or not contrib:
return
if self._getIdxEmail().has_key(email):
l = self._getIdxEmail()[email]
if contrib in l:
l.remove(contrib)
if l == []:
del self._getIdxEmail()[email]
else:
self._getIdxEmail()[email] = l
def _moveEmailtoId(self, av):
id = av.getId()
email = av.getEmail()
if not self._idx.has_key(id):
if self._getIdxEmail().has_key(email):
self._idx[id] = self._getIdxEmail()[email]
del self._getIdxEmail()[email]
class ReportNumberHolder(Persistent):
def __init__(self, owner):
self._owner=owner
self._reports={}
def getOwner(self):
return self._owner
def addReportNumber(self, system, number):
if system in self.getReportNumberKeys() or system in Config.getInstance().getReportNumberSystems().keys():
try:
if not number in self._reports[system]:
self._reports[system].append(number)
except:
self._reports[system]=[ number ]
self.notifyModification()
def removeReportNumber(self, system, number):
if self.hasReportNumbersBySystem(system):
if number in self._reports[system]:
self._reports[system].remove(number)
self.notifyModification()
def removeReportNumberById(self, id):
try:
rn = self.listReportNumbers()[int(id)]
self.removeReportNumber(rn[0], rn[1])
except:
pass
def hasReportNumbersBySystem(self, system):
return self._reports.has_key(system)
def getReportNumbersBySystem(self, system):
if self.hasReportNumbersBySystem(system):
return self._reports[system]
return None
def getReportNumberKeys(self):
return self._reports.keys()
def listReportNumbersOnKey(self, key):
reports=[]
if key in self._reports.keys():
# compatibility with previous versions
if type(self._reports[key]) is str:
self._reports[key] = [ self._reports[key] ]
for number in self._reports[key]:
reports.append([key, number])
return reports
def hasReportNumberOnSystem(self, system, number):
if self.hasReportNumbersBySystem(system):
if number in self._reports[system]:
return True
return False
def listReportNumbers(self):
reports=[]
keys = self._reports.keys()
keys.sort()
for key in keys:
# compatibility with previous versions
if type(self._reports[key]) is str:
self._reports[key] = [ self._reports[key] ]
for number in self._reports[key]:
reports.append([key, number])
return reports
def clone(self, owner):
newR=ReportNumberHolder(owner)
for key in self._reports.keys():
for number in self._reports[key]:
newR.addReportNumber(key, number)
return newR
def notifyModification(self):
self._p_changed=1
if self.getOwner() != None:
self.getOwner().notifyModification()
class Conference(CommonObjectBase, Locatable):
"""This class represents the real world conferences themselves. Objects of
this class will contain basic data about the confence and will provide
access to other objects representing certain parts of the conferences
(ex: contributions, sessions, ...).
"""
fossilizes(IConferenceFossil, IConferenceMinimalFossil, IConferenceEventInfoFossil)
def __init__(self, id=''):
self.id = id
self.title = ""
self.description = ""
self.places = []
self.rooms = []
###################################
# Fermi timezone awareness #
###################################
self.startDate = nowutc()
self.endDate = nowutc()
self.timezone = ""
###################################
# Fermi timezone awareness(end) #
###################################
self._screenStartDate = None
self._screenEndDate = None
self.contactInfo =""
self.chairmanText = ""
self.chairmans = []
self._chairGen=Counter()
self._chairs=[]
self.sessions = {}
self.__sessionGenerator = Counter() # Provides session unique
# identifiers for this conference
self.contributions = {}
self.__contribGenerator = Counter() # Provides contribution unique
# identifiers for this conference
self.programDescription = ""
self.program = []
self.__programGenerator = Counter() # Provides track unique
# identifiers for this conference
self.__ac = AccessController(self)
self.materials = {}
self.__materialGenerator = Counter() # Provides material unique
# identifiers for this conference
self.paper = None
self.slides = None
self.video = None
self.poster = None
self.__schedule=None
self.__owners = []
self._modificationDS = self._creationDS = nowutc()
self.abstractMgr = review.AbstractMgr(self)
self._logo = None
self._trackCoordinators = TCIndex() #index for the track coordinators
self._supportInfo = SupportInfo(self, "Support")
self._contribTypes = {}
self.___contribTypeGenerator = Counter()
self._authorIdx=AuthorIndex()
self._speakerIdx=AuthorIndex()
self._primAuthIdx=_PrimAuthIdx(self)
self._sessionCoordinators=SCIndex()
self._sessionCoordinatorRights = []
self._submitterIdx=SubmitterIndex()
self._boa=BOAConfig(self)
self._registrationForm = registration.RegistrationForm(self)
self._registrants = {} #key=registrantId; value=Registrant
self._bookings = {}
self._registrantGenerator = Counter()
self._accessKey=""
self._modifKey=""
self._closed = False
self._visibility = 999
self._pendingQueuesMgr=pendingQueues.ConfPendingQueuesMgr(self)
self._sections = []
self._participation = Participation(self)
self._reportNumberHolder=ReportNumberHolder(self)
self._enableSessionSlots = False
self._enableSessions = False
self._autoSolveConflict = True
self.__badgeTemplateManager = BadgeTemplateManager(self)
self.__posterTemplateManager = PosterTemplateManager(self)
self._keywords = ""
self._confPaperReview = ConferencePaperReview(self)
self._confAbstractReview = ConferenceAbstractReview(self)
self._orgText = ""
self._comments = ""
self._sortUrlTag = ""
self._observers = []
@return_ascii
def __repr__(self):
return '<Conference({0}, {1}, {2})>'.format(self.getId(), self.getTitle(), self.getStartDate())
@property
def all_manager_emails(self):
"""Returns the emails of all managers"""
# We ignore email principals here. They never signed up in indico anyway...
return {p.principal.email for p in self.as_event.acl_entries if p.type == PrincipalType.user}
@property
@memoize_request
def as_event(self):
"""Returns the :class:`.Event` for this object
:rtype: indico.modules.events.models.events.Event
"""
from indico.modules.events.models.events import Event
query = Event.find(id=int(self.id))
# this is pretty ugly, but the api sends queries in a loop and we can't
# really avoid this for now. so let's at least not query things we
# clearly don't need
if has_request_context() and request.blueprint == 'api':
acl_user_strategy = joinedload('acl_entries').defaultload('user')
# remote group membership checks will trigger a load on _all_emails
# but not all events use this so there's no need to eager-load them
acl_user_strategy.noload('_primary_email')
acl_user_strategy.noload('_affiliation')
query = query.options(acl_user_strategy)
return query.one()
@property
@memoize_request
def note(self):
from indico.modules.events.notes.models.notes import EventNote
return EventNote.get_for_linked_object(self)
@property
@memoize_request
def scheduled_notes(self):
from indico.modules.events.notes.util import get_nested_notes
return set(get_nested_notes(self)) - {self.note}
@property
def tz(self):
from MaKaC.common.timezoneUtils import DisplayTZ
return DisplayTZ(conf=self).getDisplayTZ()
@unify_user_args
def log(self, *args, **kwargs):
self.as_event.log(*args, **kwargs)
@memoize_request
def has_feature(self, feature):
return self.as_event.has_feature(feature)
@staticmethod
def _cmpByDate(self, toCmp):
res = cmp(self.getStartDate(), toCmp.getStartDate())
if res != 0:
return res
else:
return cmp(self, toCmp)
def __cmp__(self, toCmp):
if isinstance(toCmp, Conference):
return cmp(self.getId(), toCmp.getId())
else:
return cmp(hash(self), hash(toCmp))
def __eq__(self, toCmp):
return self is toCmp
def __ne__(self, toCmp):
return not(self is toCmp)
def setUrlTag(self, tag):
self._sortUrlTag = tag
def getUrlTag(self):
try:
return self._sortUrlTag
except:
self._sortUrlTag = ""
return self._sortUrlTag
def setComments(self,comm=""):
self._comments = comm.strip()
def getComments(self):
try:
if self._comments:
pass
except AttributeError,e:
self.setComments()
return self._comments
def getConfPaperReview(self):
if not hasattr(self, "_confPaperReview"):
self._confPaperReview = ConferencePaperReview(self)
return self._confPaperReview
def getConfAbstractReview(self):
if not hasattr(self, "_confAbstractReview"):
self._confAbstractReview = ConferenceAbstractReview(self)
return self._confAbstractReview
def getOrgText( self ):
try:
return self._orgText
except:
self.setOrgText()
return ""
def setOrgText( self, org="" ):
self._orgText = org
def cleanCache( self ):
if not ContextManager.get('clean%s'%self.getUniqueId(), False):
ScheduleToJson.cleanConferenceCache(self)
ContextManager.set('clean%s'%self.getUniqueId(), True)
def updateNonInheritingChildren(self, elem, delete=False):
self.getAccessController().updateNonInheritingChildren(elem, delete)
def getKeywords(self):
try:
return self._keywords
except:
self._keywords = ""
return ""
def setKeywords(self, keywords):
self._keywords = keywords
# Room booking related ===================================================
def getRoomBookingList(self):
"""Returns list of bookings for this conference."""
# In case anyone wonders why this method is still here: Various fossils expect/use it.
return self.as_event.reservations.options(noload('created_by_user'), noload('booked_for_user')).all()
# ========================================================================
def getParticipation(self):
try :
if self._participation :
pass
except AttributeError :
self._participation = Participation(self)
return self._participation
def getType( self ):
import MaKaC.webinterface.webFactoryRegistry as webFactoryRegistry
wr = webFactoryRegistry.WebFactoryRegistry()
wf = wr.getFactory(self)
if wf != None:
type = wf.getId()
else:
type = "conference"
return type
def getVerboseType( self ):
# Like getType, but returns "Lecture" instead of "simple_type"
type = self.getType()
if type == "simple_event":
type = "lecture"
return type.capitalize()
def getEnableSessionSlots(self):
#try :
# if self._enableSessionSlots :
# pass
#except AttributeError :
# self._enableSessionSlots = True
#if self.getType() == "conference":
# return True
#return self._enableSessionSlots
return True
def getEnableSessions(self):
try :
if self._enableSessions :
pass
except AttributeError :
self._enableSessions = True
if self.getType() == "conference":
return True
return self._enableSessions
def enableSessionSlots(self):
self._enableSessionSlots = True
def disableSessionSlots(self):
self._enableSessionSlots = False
def enableSessions(self):
self._enableSessions = True
def disableSessions(self):
self._enableSessions = False
def setValues(self, confData):
"""
Sets SOME values of the current conference object from a dictionary
containing the following key-value pairs:
visibility-(str)
title-(str)
description-(str)
supportEmail-(str)
contactInfo-(str)
locationName-(str) => name of the location, if not specified
it will be set to the conference location name.
locationAddress-(str)
roomName-(str) => name of the room, if not specified it will
be set to the conference room name.
Please, note that this method sets SOME values which means that if
needed it can be completed to set more values. Also note that if
the given dictionary doesn't contain all the values, the missing
ones will be set to the default values.
"""
self.setVisibility(confData.get("visibility", "999"))
self.setTitle(confData.get("title", _("NO TITLE ASSIGNED")))
self.setDescription(confData.get("description", ""))
self.getSupportInfo().setEmail(confData.get("supportEmail", ""))
self.setContactInfo(confData.get("contactInfo", ""))
if confData.get("locationName", "").strip() == "":
self.setLocation(None)
else:
#if the location name is defined we must set a new location (or
# modify the existing one) for the conference
loc = self.getLocation()
if not loc:
loc = CustomLocation()
self.setLocation(loc)
loc.setName(confData["locationName"])
loc.setAddress(confData.get("locationAddress", ""))
#same as for the location
if confData.get("roomName", "").strip() == "":
self.setRoom(None)
else:
room = self.getRoom()
if not room:
room = CustomRoom()
self.setRoom(room)
room.setName(confData["roomName"])
self.notifyModification()
def getVisibility ( self ):
try:
return int(self._visibility)
except:
self._visibility = 999
return 999
def getFullVisibility( self ):
return max(0,min(self.getVisibility(), self.getOwnerList()[0].getVisibility()))
def setVisibility( self, visibility=999 ):
self._visibility = int(visibility)
catIdx = indexes.IndexesHolder().getIndex('category')
catIdx.reindexConf(self)
catDateIdx = indexes.IndexesHolder().getIndex('categoryDate')
catDateAllIdx = indexes.IndexesHolder().getIndex('categoryDateAll')
catDateIdx.reindexConf(self)
catDateAllIdx.reindexConf(self)
def isClosed( self ):
try:
return self._closed
except:
self._closed = False
return False
def setClosed( self, closed=True ):
self._closed = closed
def indexConf( self ):
# called when event dates change
# see also Category.indexConf()
calIdx = indexes.IndexesHolder().getIndex('calendar')
calIdx.indexConf(self)
catDateIdx = indexes.IndexesHolder().getIndex('categoryDate')
catDateAllIdx = indexes.IndexesHolder().getIndex('categoryDateAll')
catDateIdx.indexConf(self)
catDateAllIdx.indexConf(self)
nameIdx = indexes.IndexesHolder().getIndex('conferenceTitle')
nameIdx.index(self)
Catalog.getIdx('categ_conf_sd').index_obj(self)
def unindexConf( self ):
calIdx = indexes.IndexesHolder().getIndex('calendar')
calIdx.unindexConf(self)
catDateIdx = indexes.IndexesHolder().getIndex('categoryDate')
catDateAllIdx = indexes.IndexesHolder().getIndex('categoryDateAll')
catDateIdx.unindexConf(self)
catDateAllIdx.unindexConf(self)
nameIdx = indexes.IndexesHolder().getIndex('conferenceTitle')
nameIdx.unindex(self)
Catalog.getIdx('categ_conf_sd').unindex_obj(self)
def __generateNewContribTypeId( self ):
"""Returns a new unique identifier for the current conference sessions
"""
try:
return str(self.___contribTypeGenerator.newCount())
except:
self.___contribTypeGenerator = Counter()
return str(self.___contribTypeGenerator.newCount())
def addContribType(self, ct):
try:
if self._contribTypes:
pass
except:
self._contribTypes = {}
if ct in self._contribTypes.values():
return
id = ct.getId()
if id == "":
id = self.__generateNewContribTypeId()
ct.setId(id)
self._contribTypes[id] = ct
self.notifyModification()
def newContribType(self, name, description):
ct = ContributionType(name, description, self)
self.addContribType(ct)
return ct
def getContribTypeList(self):
try:
return self._contribTypes.values()
except:
self._contribTypes = {}
self.notifyModification()
return self._contribTypes.values()
def getContribTypeById(self, id):
try:
if self._contribTypes:
pass
except:
self._contribTypes = {}
self.notifyModification()
if id in self._contribTypes.keys():
return self._contribTypes[id]
return None
def removeContribType(self, ct):
try:
if self._contribTypes:
pass
except:
self._contribTypes = {}
if not ct in self._contribTypes.values():
return
del self._contribTypes[ct.getId()]
self._p_changed = True
for cont in self.getContributionList():
if cont.getType() == ct:
cont.setType(None)
ct.delete()
self.notifyModification()
def recoverContribType(self, ct):
ct.setConference(self)
self.addContribType(ct)
ct.recover()
def _getRepository( self ):
dbRoot = DBMgr.getInstance().getDBConnection().root()
try:
fr = dbRoot["local_repositories"]["main"]
except KeyError, e:
fr = fileRepository.MaterialLocalRepository()
dbRoot["local_repositories"] = OOBTree()
dbRoot["local_repositories"]["main"] = fr
return fr
def removeResource( self, res ):
pass
def getURL(self):
cid = self.getUrlTag()
if not cid:
cid = self.getId()
return Config.getInstance().getShortEventURL() + cid
def setLogo( self, logoFile ):
logoFile.setOwner( self )
logoFile.setId( "logo" )
logoFile.archive( self._getRepository() )
if self._logo != None:
self._logo.delete()
self._logo = logoFile
self.notifyModification()
def getLogo( self ):
return self._logo
def getLogoURL( self ):
try:
if self._logo == None:
return ""
return self._logo.getURL()
except AttributeError:
self._logo = None
return ""
def removeLogo(self):
if self._logo is None:
return
self._logo.delete()
self._logo = None
self.notifyModification()
def recoverLogo(self, logo):
logo.setOwner(self)
if self._logo != None:
self._logo.delete()
self._logo = logo
logo.recover()
self.notifyModification()
def getSession(self):
return None
def getContribution(self):
return None
def getSubContribution(self):
return None
def getAbstractMgr(self):
return self.abstractMgr
def notifyModification( self, date = None, raiseEvent = True):
"""Method called to notify the current conference has been modified.
"""
self.setModificationDate()
if raiseEvent and self.id:
signals.event.data_changed.send(self, attr=None, old=None, new=None)
self.cleanCache()
self._p_changed=1
def getModificationDate( self ):
"""Returns the date in which the conference was last modified"""
return self._modificationDS
def getAdjustedModificationDate( self, tz ):
"""Returns the date in which the conference was last modified"""
return self._modificationDS.astimezone(timezone(tz))
def getCreationDate( self ):
"""Returns the date in which the conference was created"""
return self._creationDS
def getAdjustedCreationDate( self, tz ):
"""Returns the date in which the conference was created"""
return self._creationDS.astimezone(timezone(tz))
def getId( self ):
"""returns (string) the unique identifier of the conference"""
return self.id
def getUniqueId( self ):
"""returns (string) the unique identiffier of the item"""
"""used mainly in the web session access key table"""
return "a%s" % self.id
def setId(self, newId):
"""changes the current unique identifier of the conference to the
one which is specified"""
self.id = str(newId)
def getLocator( self ):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the conference instance """
d = Locator()
d["confId"] = self.getId()
return d
def getOwner( self ):
if self.getOwnerList() == []:
return None
return self.getOwnerList()[0]
def getOwnerList( self ):
return self.__owners
def getOwnerPath( self ):
l=[]
owner = self.getOwnerList()[0]
while owner != None and owner.getId() != "0":
l.append(owner)
owner = owner.getOwner()
return l
def getOwnerById( self, key ):
"""Returns one specific category which contains the conference.
Params:
- key: The "id" of the category.
"""
for owner in self.__owners:
if key == owner.getId():
return owner
return None
def addOwner( self, newOwner ):
if newOwner == None:
return
self.__owners.append( newOwner )
self.notifyModification()
def removeOwner( self, owner, notify=True ):
if not (owner in self.__owners):
return
self.__owners.remove( owner )
owner.removeConference( self )
if notify:
self.notifyModification()
def getCategoriesPath(self):
return [self.getOwnerList()[0].getCategoryPath()]
def notifyContributions(self):
for c in self.getContributionList():
# take care of subcontributions
for sc in c.getSubContributionList():
signals.event.subcontribution_deleted.send(sc, parent=c)
signals.event.contribution_deleted.send(c, parent=self)
def delete(self, user=None):
"""deletes the conference from the system.
"""
signals.event.deleted.send(self, user=user)
self.notifyContributions()
# will have to remove it from all the owners (categories) and the
# conference registry
ConferenceHolder().remove(self)
for owner in self.__owners:
owner.removeConference(self, notify=False)
# Remove all links in redis
if redis_write_client:
avatar_links.delete_event(self)
# Remote short URL mappings
ShortURLMapper().remove(self)
TrashCanManager().add(self)
def getConference( self ):
return self
def getObservers(self):
if not hasattr(self, "_observers"):
self._observers = []
return self._observers
def setDates( self, sDate, eDate=None, check=1, moveEntries=0):
"""
Set the start/end date for a conference
"""
oldStartDate = self.getStartDate()
oldEndDate = self.getEndDate()
# do some checks first
if sDate > eDate:
# obvious case
raise FormValuesError(_("Start date cannot be after the end date"), _("Event"))
elif sDate == oldStartDate and eDate == oldEndDate:
# if there's nothing to do (yet another obvious case)
return
# if we reached this point, it means either the start or
# the end date (or both) changed
# If only the end date was changed, moveEntries = 0
if sDate == oldStartDate:
moveEntries = 0
# Pre-check for moveEntries
if moveEntries == 1:
# in case the entries are to be simply shifted
# we should make sure the interval is big enough
# just store the old values for later
oldInterval = oldEndDate - oldStartDate
newInterval = eDate - sDate
entries = self.getSchedule().getEntries()
if oldInterval > newInterval and entries:
eventInterval = entries[-1].getEndDate() - entries[0].getStartDate()
diff = entries[0].getStartDate() - oldStartDate
if sDate + diff + eventInterval > eDate:
raise TimingError(
_("The start/end dates were not changed since the selected "
"timespan is not large enough to accomodate the contained "
"timetable entries and spacings."),
explanation=_("You should try using a larger timespan."))
# so, we really need to try changing something
self.unindexConf()
# set the dates
self.setStartDate(sDate, check=0, moveEntries = moveEntries, index=False, notifyObservers = False)
self.setEndDate(eDate, check=0, index=False, notifyObservers = False)
# sanity check
self._checkInnerSchedule()
# reindex the conference
self.indexConf()
# notify observers
old_data = (oldStartDate, oldEndDate)
new_data = (self.getStartDate(), self.getEndDate())
if old_data != new_data:
signals.event.data_changed.send(self, attr='dates', old=old_data, new=new_data)
def _checkInnerSchedule( self ):
self.getSchedule().checkSanity()
def setStartDate(self, sDate, check = 1, moveEntries = 0, index = True, notifyObservers = True):
""" Changes the current conference starting date/time to the one specified by the parameters.
"""
if not sDate.tzname():
raise MaKaCError("date should be timezone aware")
if sDate == self.getStartDate():
return
###################################
# Fermi timezone awareness #
###################################
if not indexes.BTREE_MIN_UTC_DATE <= sDate <= indexes.BTREE_MAX_UTC_DATE:
raise FormValuesError(_("The start date must be between {} and {}.").format(
format_datetime(indexes.BTREE_MIN_UTC_DATE),
format_datetime(indexes.BTREE_MAX_UTC_DATE)))
###################################
# Fermi timezone awareness #
###################################
if check != 0:
self.verifyStartDate(sDate)
oldSdate = self.getStartDate()
diff = sDate - oldSdate
if index:
self.unindexConf()
self.startDate = sDate
if moveEntries and diff is not None:
# If the start date changed, we move entries inside the timetable
self.getSchedule()._startDate=None
self.getSchedule()._endDate=None
#if oldSdate.date() != sDate.date():
# entries = self.getSchedule().getEntries()[:]
#else:
# entries = self.getSchedule().getEntriesOnDay(sDate.astimezone(timezone(self.getTimezone())))[:]
entries = self.getSchedule().getEntries()[:]
self.getSchedule().moveEntriesBelow(diff, entries, check=check)
#datetime object is non-mutable so we must "force" the modification
# otherwise ZODB won't be able to notice the change
self.notifyModification()
if index:
self.indexConf()
# Update redis link timestamp
if redis_write_client:
avatar_links.update_event_time(self)
#if everything went well, we notify the observers that the start date has changed
if notifyObservers:
if oldSdate != sDate:
signals.event.data_changed.send(self, attr='start_date', old=oldSdate, new=sDate)
def verifyStartDate(self, sdate, check=1):
if sdate>self.getEndDate():
raise MaKaCError( _("End date cannot be before the Start date"), _("Event"))
def setStartTime(self, hours=0, minutes=0, notifyObservers = True):
""" Changes the current conference starting time (not date) to the one specified by the parameters.
"""
sdate = self.getStartDate()
self.startDate = datetime( sdate.year, sdate.month, sdate.day,
int(hours), int(minutes) )
self.verifyStartDate(self.startDate)
self.notifyModification()
def getStartDate(self):
"""returns (datetime) the starting date of the conference"""
return self.startDate
def getUnixStartDate(self):
return datetimeToUnixTimeInt(self.startDate)
###################################
# Fermi timezone awareness #
###################################
def getAdjustedStartDate(self,tz=None):
if not tz:
tz = self.getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.getStartDate().astimezone(timezone(tz))
###################################
# Fermi timezone awareness(end) #
###################################
def setScreenStartDate(self, date):
if date == self.getStartDate():
date = None
self._screenStartDate = date
self.notifyModification()
def getScreenStartDate(self):
try:
date = self._screenStartDate
except:
date = self._screenStartDate = None
if date != None:
return date
else:
return self.getStartDate()
def getAdjustedScreenStartDate(self, tz=None):
if not tz:
tz = self.getTimezone()
return self.getScreenStartDate().astimezone(timezone(tz))
def calculateDayStartTime(self, day):
"""returns (date) the start date of the conference on a given day
day is a tz aware datetime"""
if self.getStartDate().astimezone(day.tzinfo).date() == day.date():
return self.getStartDate().astimezone(day.tzinfo)
return self.getSchedule().calculateDayStartDate(day)
def verifyEndDate(self, edate):
if edate<self.getStartDate():
raise TimingError( _("End date cannot be before the start date"), _("Event"))
if self.getSchedule().hasEntriesAfter(edate):
raise TimingError(_("Cannot change end date to %s: some entries in the timetable would be outside this date (%s)") % (edate,self.getSchedule().getEntries()[-1].getStartDate()), _("Event"))
def setEndDate(self, eDate, check = 1, index = True, notifyObservers = True):
""" Changes the current conference end date/time to the one specified by the parameters.
"""
if not eDate.tzname():
raise MaKaCError("date should be timezone aware")
if eDate == self.getEndDate():
return
if not indexes.BTREE_MIN_UTC_DATE <= eDate <= indexes.BTREE_MAX_UTC_DATE:
raise FormValuesError(_("The end date must be between {} and {}.").format(
format_datetime(indexes.BTREE_MIN_UTC_DATE),
format_datetime(indexes.BTREE_MAX_UTC_DATE)))
if check != 0:
self.verifyEndDate(eDate)
if index:
self.unindexConf()
oldEdate = self.endDate
self.endDate = eDate
#datetime object is non-mutable so we must "force" the modification
# otherwise ZODB won't be able to notice the change
self.notifyModification()
if index:
self.indexConf()
#if everything went well, we notify the observers that the start date has changed
if notifyObservers:
if oldEdate != eDate:
signals.event.data_changed.send(self, attr='end_date', old=oldEdate, new=eDate)
def setEndTime(self, hours = 0, minutes = 0, notifyObservers = True):
""" Changes the current conference end time (not date) to the one specified by the parameters.
"""
edate = self.getEndDate()
self.endDate = datetime( edate.year, edate.month, edate.day, int(hours), int(minutes) )
self.verifyEndDate(self.endDate)
self.notifyModification()
def getEndDate(self):
"""returns (datetime) the ending date of the conference"""
return self.endDate
##################################
# Fermi timezone awareness #
##################################
def getAdjustedEndDate(self,tz=None):
if not tz:
tz = self.getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.getEndDate().astimezone(timezone(tz))
##################################
# Fermi timezone awareness(end) #
##################################
def setScreenEndDate(self, date):
if date == self.getEndDate():
date = None
self._screenEndDate = date
self.notifyModification()
def getScreenEndDate(self):
try:
date = self._screenEndDate
except:
date = self._screenEndDate = None
if date != None:
return date
else:
return self.getEndDate()
def getAdjustedScreenEndDate(self, tz=None):
if not tz:
tz = self.getTimezone()
return self.getScreenEndDate().astimezone(timezone(tz))
def isEndDateAutoCal( self ):
"""Says whether the end date has been explicitely set for the session
or it must be calculated automatically
"""
return self._endDateAutoCal
####################################
# Fermi timezone awareness #
####################################
def setTimezone(self, tz):
try:
oldTimezone = self.timezone
except AttributeError:
oldTimezone = tz
self.timezone = tz
def getTimezone(self):
try:
return self.timezone
except:
return 'UTC'
def moveToTimezone(self, tz):
if self.getTimezone() == tz:
return
sd=self.getAdjustedStartDate()
ed=self.getAdjustedEndDate()
self.setTimezone(tz)
try:
sDate = timezone(tz).localize(datetime(sd.year, \
sd.month, \
sd.day, \
sd.hour, \
sd.minute))
eDate = timezone(tz).localize(datetime(ed.year, \
ed.month, \
ed.day, \
ed.hour, \
ed.minute))
except ValueError,e:
raise MaKaCError("Error moving the timezone: %s"%e)
self.setDates( sDate.astimezone(timezone('UTC')), \
eDate.astimezone(timezone('UTC')),
moveEntries=1)
####################################
# Fermi timezone awareness(end) #
####################################
def getTitle(self):
"""returns (String) the title of the conference"""
return self.title
def setTitle(self, title):
"""changes the current title of the conference to the one specified"""
oldTitle = self.title
self.title = title
self.notifyModification()
nameIdx = indexes.IndexesHolder().getIndex('conferenceTitle')
nameIdx.unindex(self)
nameIdx.index(self)
if oldTitle != title:
signals.event.data_changed.send(self, attr='title', old=oldTitle, new=title)
def getDescription(self):
"""returns (String) the description of the conference"""
return self.description
def setDescription(self, desc):
"""changes the current description of the conference"""
oldDescription = self.description
self.description = desc
if oldDescription != desc:
signals.event.data_changed.send(self, attr='description', old=oldDescription, new=desc)
self.notifyModification()
def getSupportInfo(self):
if not hasattr(self, "_supportInfo"):
self._supportInfo = SupportInfo(self, "Support")
return self._supportInfo
def setSupportInfo(self, supportInfo):
self._supportInfo = supportInfo
def getChairmanText( self ):
try:
if self.chairmanText:
pass
except AttributeError, e:
self.chairmanText = ""
return self.chairmanText
def setChairmanText( self, newText ):
self.chairmanText = newText.strip()
def appendChairmanText( self, newText ):
self.setChairmanText( "%s, %s"%(self.getChairmanText(), newText.strip()) )
self._chairGen=Counter()
self._chairs=[]
def _resetChairs(self):
try:
if self._chairs:
return
except AttributeError:
self._chairs=[]
for oc in self.chairmans:
newChair=ConferenceChair()
newChair.setDataFromAvatar(oc)
self._addChair(newChair)
def getChairList(self):
"""Method returning a list of the conference chairmans (Avatars)
"""
self._resetChairs()
return self._chairs
def _addChair(self,newChair):
for chair in self._chairs:
if newChair.getEmail() != "" and newChair.getEmail() == chair.getEmail():
return
try:
if self._chairGen:
pass
except AttributeError:
self._chairGen=Counter()
id = newChair.getId()
if id == "":
id=int(self._chairGen.newCount())
if isinstance(newChair,ConferenceChair):
newChair.includeInConference(self,id)
self._chairs.append(newChair)
if isinstance(newChair, AvatarUserWrapper):
newChair.linkTo(self, "chair")
self.notifyModification()
def addChair(self,newChair):
"""includes the specified user in the list of conference
chairs"""
self._resetChairs()
self._addChair(newChair)
def removeChair(self,chair):
"""removes the specified user from the list of conference
chairs"""
self._resetChairs()
if chair not in self._chairs:
return
self._chairs.remove(chair)
if isinstance(chair, AvatarUserWrapper):
chair.unlinkTo(self, "chair")
chair.delete()
self.notifyModification()
def recoverChair(self, ch):
self.addChair(ch)
ch.recover()
def getChairById(self,id):
id=int(id)
for chair in self._chairs:
if chair.getId()==id:
return chair
return None
def getAllSessionsConvenerList(self) :
dictionary = {}
for session in self.getSessionList() :
for convener in session.getConvenerList() :
key = convener.getEmail()+" "+convener.getFirstName().lower()+" "+convener.getFamilyName().lower()
dictionary.setdefault(key, set()).add(convener)
for slot in session.getSlotList():
for convener in slot.getConvenerList() :
key = convener.getEmail()+" "+convener.getFirstName().lower()+" "+convener.getFamilyName().lower()
dictionary.setdefault(key, set()).add(convener)
return dictionary
def getContactInfo(self):
return self.contactInfo
def setContactInfo(self, contactInfo):
self.contactInfo = contactInfo
self.notifyModification()
def getLocationParent( self ):
"""
Returns the object from which the room/location
information should be inherited.
For Conferences, it's None, since they can't inherit
from anywhere else.
"""
return None
def getLocation( self ):
return self.getOwnLocation()
def getAddress( self ):
if self.getOwnLocation():
return self.getOwnLocation().getAddress()
else:
return None
def getRoom( self ):
return self.getOwnRoom()
def getLocationList(self):
"""Method returning a list of "location" objects which contain the
information about the different places the conference is gonna
happen
"""
return self.places
def getFavoriteRooms(self):
roomList = []
roomList.extend(self.getRoomList())
#roomList.extend(map(lambda x: x._getName(), self.getBookedRooms()))
return roomList
def addLocation(self, newPlace):
self.places.append( newPlace )
self.notifyModification()
def setAccessKey(self, accessKey=""):
"""sets the access key of the conference"""
self._accessKey = accessKey
self.notifyModification()
def getAccessKey(self):
try:
return self._accessKey
except AttributeError:
self._accessKey = ""
return self._accessKey
def setModifKey(self, modifKey=""):
"""sets the modification key of the conference"""
self._modifKey = modifKey
self.notifyModification()
def getModifKey(self):
try:
return self._modifKey
except AttributeError:
self._modifKey = ""
return self._modifKey
def __generateNewSessionId( self ):
"""Returns a new unique identifier for the current conference sessions
"""
return str(self.__sessionGenerator.newCount())
def addSession(self, new_session, check=2, session_id=None):
"""Adds a new session object to the conference taking care of assigning
a new unique id to it
"""
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if self.hasSession(new_session):
return
if self.getSchedule().isOutside(new_session):
if check == 1:
raise MaKaCError(_("Cannot add this session (Start:%s - End:%s) "
"Outside of the event's time table(Start:%s - End:%s)").format(
new_session.getStartDate(),
new_session.getEndDate(),
self.getSchedule().getStartDate(),
self.getSchedule().getEndDate()),
"Event")
elif check == 2:
if self.getSchedule().getStartDate() > new_session.getStartDate():
self.setStartDate(new_session.getStartDate())
if self.getSchedule().getEndDate() < new_session.getEndDate():
self.setEndDate(new_session.getEndDate())
if session_id is not None:
session_id = session_id
# Keep ID counter up to date
self.__sessionGenerator.sync(session_id)
else:
session_id = self.__generateNewSessionId()
self.sessions[session_id] = new_session
new_session.includeInConference(self, session_id)
# keep the session coordinator index updated
for sc in new_session.getCoordinatorList():
self.addSessionCoordinator(new_session, sc)
self.notifyModification()
def hasSession(self,session):
if session != None and session.getConference()==self and \
self.sessions.has_key(session.getId()):
return True
return False
def removeSession(self,session, deleteContributions=False):
if self.hasSession(session):
for sc in session.getCoordinatorList():
self.removeSessionCoordinator(session,sc)
if deleteContributions:
for contrib in session.getContributionList():
contrib.delete()
session.remove_attachments()
del self.sessions[session.getId()]
self._p_changed = True
session.delete()
self.notifyModification()
def recoverSession(self, session, check, isCancelled):
self.addSession(session, check, session.getId())
session.recover(isCancelled)
def getSessionById( self, sessionId ):
"""Returns the session from the conference list corresponding to the
unique session id specified
"""
return self.sessions.get(sessionId,None)
def getRoomList(self):
roomList =[]
for session in self.sessions.values():
if session.getRoom()!=None:
roomname = session.getRoom().getName()
if roomname not in roomList:
roomList.append(roomname)
return roomList
def getSessionList( self ):
"""Retruns a list of the conference session objects
"""
return self.sessions.values()
def getSessionListSorted( self ):
"""Retruns a sorted list of the conference sessions
"""
res=[]
for entry in self.getSchedule().getEntries():
if isinstance(entry,LinkedTimeSchEntry) and \
isinstance(entry.getOwner(),SessionSlot):
session=entry.getOwner().getSession()
if session not in res:
res.append(session)
return res
def getSessionSlotList(self):
return [slot for session in self.sessions.values() for slot in session.getSlotList()]
def getNumberOfSessions(self):
return len(self.sessions)
def _generateNewContributionId(self):
"""Returns a new unique identifier for the current conference
contributions
"""
return str(self.__contribGenerator.newCount())
def genNewAbstractId(self):
return str(self.__contribGenerator.newCount())
def syncContribCounter(self):
self.__contribGenerator.sync(self.getAbstractMgr()._getOldAbstractCounter())
return self.__contribGenerator._getCount()
def addContribution(self, newContrib, contrib_id=None):
"""Adds a new contribution object to the conference taking care of
assigning a new unique id to it
"""
if self.hasContribution(newContrib):
return
if isinstance(newContrib.getCurrentStatus(),ContribStatusWithdrawn):
raise MaKaCError( _("Cannot add a contribution which has been withdrawn"), _("Event"))
if contrib_id is None or contrib_id == '':
contribId=self._generateNewContributionId()
while self.contributions.has_key(contribId):
contribId=self._generateNewContributionId()
else:
contribId = str(contrib_id)
self.__contribGenerator.sync(contribId)
if self.contributions.has_key(contribId):
raise MaKaCError( _("Cannot add this contribution id:(%s) as it has already been used")%contribId, _("Event"))
newContrib.includeInConference(self,contribId)
self.contributions[contribId]=newContrib
for auth in newContrib.getAuthorList():
self.indexAuthor(auth)
for spk in newContrib.getSpeakerList():
self.indexSpeaker(spk)
for sub in newContrib.getSubmitterList():
self.addContribSubmitter(newContrib,sub)
signals.event.contribution_created.send(newContrib, parent=self)
self.notifyModification()
def hasContribution(self,contrib):
return contrib.getConference()==self and \
self.contributions.has_key(contrib.getId())
def removeContribution( self, contrib, callDelete=True ):
if not self.contributions.has_key( contrib.getId() ):
return
for sub in contrib.getSubmitterList()[:]:
self.removeContribSubmitter(contrib,sub)
for auth in contrib.getPrimaryAuthorList()[:]:
contrib.removePrimaryAuthor(auth)
for auth in contrib.getCoAuthorList()[:]:
contrib.removeCoAuthor(auth)
for spk in contrib.getSpeakerList()[:]:
contrib.removeSpeaker(spk)
del self.contributions[ contrib.getId() ]
self._p_changed = True
if callDelete:
contrib.delete()
#else:
# contrib.unindex()
self.notifyModification()
def recoverContribution(self, contrib):
self.addContribution(contrib, contrib.getId())
contrib.recover()
# Note: this kind of factories should never be used as they only allow to
# create a single type of contributions
def newContribution( self, id=None ):
"""Creates and returns a new contribution object already added to the
conference list (all its data is set to the default)
"""
c = Contribution()
self.addContribution( c, id )
return c
def getOwnContributionById( self, id ):
"""Returns the contribution from the conference list corresponding to
the unique contribution id specified
"""
if self.contributions.has_key( id ):
return self.contributions[ id ]
return None
def getContributionById( self, id ):
"""Returns the contribution corresponding to the id specified
"""
return self.contributions.get(str(id).strip(),None)
def getContributionList(self):
"""Returns a list of the conference contribution objects
"""
return self.contributions.values()
def iterContributions(self):
return self.contributions.itervalues()
def getContributionListWithoutSessions(self):
"""Returns a list of the conference contribution objects which do not have a session
"""
return [c for c in self.contributions.values() if not c.getSession()]
def getContributionListSorted(self, includeWithdrawn=True, key="id"):
"""Returns a list of the conference contribution objects, sorted by key provided
"""
contributions = self.contributions.values()
if not includeWithdrawn:
contributions = filter(lambda c: not isinstance(c.getCurrentStatus(), ContribStatusWithdrawn), contributions)
contributions.sort(key = lambda c: getattr(c, key))
return contributions
def getNumberOfContributions(self, only_scheduled=False):
if only_scheduled:
return len(filter(lambda c: c.isScheduled(), self.contributions.itervalues()))
else:
return len(self.contributions)
def hasSomethingOnWeekend(self, day):
"""Checks if the event has a session or contribution on the weekend indicated by `day`.
`day` must be either a saturday or a sunday"""
if day.weekday() == 5:
weekend = (day, day + timedelta(days=1))
elif day.weekday() == 6:
weekend = (day, day - timedelta(days=1))
else:
raise ValueError('day must be on a weekend')
return (any(c.startDate.date() in weekend and not isinstance(c.getCurrentStatus(), ContribStatusWithdrawn)
for c in self.contributions.itervalues() if c.startDate is not None) or
any(s.startDate.date() in weekend for s in self.sessions.itervalues() if s.startDate is not None))
def getProgramDescription(self):
try:
return self.programDescription
except:
self.programDescription = ""
return self.programDescription
def setProgramDescription(self, txt):
self.programDescription = txt
def _generateNewTrackId( self ):
"""
"""
return str(self.__programGenerator.newCount())
def addTrack( self, newTrack ):
"""
"""
#XXX: The conference program shoul be isolated in a separated object
if newTrack in self.program:
return
trackId = newTrack.getId()
if trackId == "not assigned":
trackId = self._generateNewTrackId()
self.program.append( newTrack )
newTrack.setConference( self )
newTrack.setId( trackId )
self.notifyModification()
def removeTrack( self, track ):
if track in self.program:
track.delete()
if track in self.program:
self.program.remove( track )
self.notifyModification()
def recoverTrack(self, track):
self.addTrack(track)
track.recover()
def newTrack( self ):
"""
"""
t = Track()
self.addTrack( t )
return t
def getTrackById( self, id ):
"""
"""
for track in self.program:
if track.getId() == id.strip():
return track
return None
def getTrackList( self ):
"""
"""
return self.program
def isLastTrack(self,track):
"""
"""
return self.getTrackPos(track)==(len(self.program)-1)
def isFirstTrack(self,track):
"""
"""
return self.getTrackPos(track)==0
def getTrackPos(self,track):
"""
"""
return self.program.index(track)
def moveTrack(self,track,newPos):
"""
"""
self.program.remove(track)
self.program.insert(newPos,track)
self.notifyModification()
def moveUpTrack(self,track):
"""
"""
if self.isFirstTrack(track):
return
newPos=self.getTrackPos(track)-1
self.moveTrack(track,newPos)
def moveDownTrack(self,track):
"""
"""
if self.isLastTrack(track):
return
newPos = self.getTrackPos(track) + 1
self.moveTrack(track, newPos)
def _cmpTracks(self, t1, t2):
o1 = self.program.index(t1)
o2 = self.program.index(t2)
return cmp(o1, o2)
def sortTrackList(self, l):
"""Sorts out a list of tracks according to the current programme order.
"""
if len(l) == 0:
return []
elif len(l) == 1:
return [l[0]]
else:
res = []
for i in l:
res.append(i)
res.sort(self._cmpTracks)
return res
def requireDomain(self, dom):
self.__ac.requireDomain(dom)
signals.event.domain_access_granted.send(self, domain=dom)
def freeDomain(self, dom):
self.__ac.freeDomain(dom)
signals.event.domain_access_revoked.send(self, domain=dom)
def getDomainList(self):
return self.__ac.getRequiredDomainList()
def isProtected(self):
"""Tells whether a conference is protected for accessing or not
"""
return self.__ac.isProtected()
def getAccessProtectionLevel( self ):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected( self ):
return self.__ac.isItselfProtected()
def hasAnyProtection( self ):
"""Tells whether a conference has any kind of protection over it:
access or domain protection.
"""
if self.isProtected():
return True
if self.getDomainList():
return True
if self.getAccessProtectionLevel() == -1:
return False
for owner in self.getOwnerList():
if owner.hasAnyProtection():
return True
return False
def hasProtectedOwner( self ):
return self.__ac._getFatherProtection()
def setProtection( self, private ):
"""
Allows to change the conference access protection
"""
oldValue = 1 if self.isProtected() else -1
self.getAccessController().setProtection( private )
if oldValue != private:
# notify listeners
signals.event.protection_changed.send(self, old=oldValue, new=private)
def grantAccess( self, prin ):
self.__ac.grantAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "access")
def revokeAccess( self, prin ):
self.__ac.revokeAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "access")
def canView( self, aw ):
"""tells whether the specified access wrappers has access to the current
object or any of its parts"""
if self.canAccess( aw ):
return True
for session in self.getSessionList():
if session.canView( aw ):
return True
for contrib in self.getContributionList():
if contrib.canView( aw ):
return True
return False
def isAllowedToAccess( self, av):
"""tells if a user has privileges to access the current conference
(independently that it is protected or not)
"""
if not av:
return False
if (av in self.getChairList()) or (self.__ac.canUserAccess( av )) or (self.canUserModify( av )):
return True
# if the conference is not protected by itself
if not self.isItselfProtected():
# then inherit behavior from parent category
for owner in self.getOwnerList():
if owner.isAllowedToAccess( av ):
return True
# track coordinators are also allowed to access the conference
for track in self.getTrackList():
if track.isCoordinator( av ):
return True
# paper reviewing team should be also allowed to access
if self.getConfPaperReview().isInReviewingTeam(av):
return True
return False
def canAccess( self, aw ):
"""Tells whether an access wrapper is allowed to access the current
conference: when the conference is protected, only if the user is a
chair or is granted to access the conference, when the client ip is
not restricted.
"""
# Allow harvesters (Invenio, offline cache) to access
# protected pages
if has_request_context() and self.__ac.isHarvesterIP(request.remote_addr):
return True
if self.isProtected():
if self.isAllowedToAccess(aw.getUser()):
return True
else:
return self.canKeyAccess(aw) or self.canModify(aw)
else:
# Domain control is triggered just for PUBLIC events
return self.canIPAccess(request.remote_addr) or self.canModify(aw)
def canKeyAccess(self, aw, key=None):
accessKey = self.getAccessKey()
if not accessKey:
return False
return key == accessKey or session.get('accessKeys', {}).get(self.getUniqueId()) == accessKey
def canKeyModify(self):
modifKey = self.getModifKey()
if not modifKey:
return False
return session.get('modifKeys', {}).get(self.id) == modifKey
@unify_user_args
def canUserModify(self, user):
return self.as_event.can_manage(user)
def canModify(self, aw_or_user):
"""Tells whether an access wrapper is allowed to modify the current
conference: only if the user is granted to modify the conference and
he is accessing from an IP address which is not restricted.
"""
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
if isinstance(aw_or_user, AvatarUserWrapper):
aw_or_user = aw_or_user.user
return self.as_event.can_manage(aw_or_user, allow_key=True)
def getManagerList(self):
managers = sorted([x.principal for x in self.as_event.acl_entries if x.has_management_role()],
key=lambda x: (not x.is_group, x.name.lower()))
return [x.as_legacy for x in managers]
def getRegistrarList(self):
registrars = sorted([x.principal for x in self.as_event.acl_entries if x.has_management_role('registration',
explicit=True)],
key=lambda x: (not x.is_group, x.name.lower()))
return [x.as_legacy for x in registrars]
@unify_user_args
def canManageRegistration(self, av):
return self.as_event.can_manage(av, role='registration', allow_key=True)
def getAllowedToAccessList( self ):
return self.__ac.getAccessList()
def addMaterial( self, newMat ):
newMat.setId( str(self.__materialGenerator.newCount()) )
newMat.setOwner( self )
self.materials[ newMat.getId() ] = newMat
self.notifyModification()
def _setSchedule( self, sch=None ):
self.__schedule=ConferenceSchedule(self)
for session in self.getSessionList():
for slot in session.getSlotList():
self.__schedule.addEntry(slot.getConfSchEntry())
def getSchedule( self ):
try:
if not self.__schedule:
self._setSchedule()
except AttributeError, e:
self._setSchedule()
return self.__schedule
def fit( self ):
sch = self.getSchedule()
sDate = sch.calculateStartDate()
eDate = sch.calculateEndDate()
self.setStartDate(sDate)
self.setEndDate(eDate)
def fitSlotsOnDay( self, day ):
for entry in self.getSchedule().getEntriesOnDay(day) :
if isinstance(entry.getOwner(), SessionSlot) :
entry.getOwner().fit()
def getDefaultStyle(self):
return (layout_settings.get(self, 'timetable_theme') or
HelperMaKaCInfo.getMaKaCInfoInstance().getStyleManager().getDefaultStyleForEventType(self.getType()))
def clone( self, startDate, options, eventManager=None, userPerformingClone = None ):
# startDate must be in the timezone of the event (to avoid problems with daylight-saving times)
cat = self.getOwnerList()[0]
managing = options.get("managing",None)
if managing is not None:
creator = managing
else:
creator = self.as_event.creator
conf = cat.newConference(creator)
if managing is not None:
with conf.as_event.logging_disabled:
conf.as_event.update_principal(managing.user, full_access=True)
conf.setTitle(self.getTitle())
conf.setDescription(self.getDescription())
conf.setTimezone(self.getTimezone())
for loc in self.getLocationList():
if loc is not None:
conf.addLocation(loc.clone())
if self.getRoom() is not None:
conf.setRoom(self.getRoom().clone())
startDate = timezone(self.getTimezone()).localize(startDate).astimezone(timezone('UTC'))
timeDelta = startDate - self.getStartDate()
endDate = self.getEndDate() + timeDelta
conf.setDates( startDate, endDate, moveEntries=1 )
conf.setContactInfo(self.getContactInfo())
conf.setChairmanText(self.getChairmanText())
conf.setVisibility(self.getVisibility())
conf.setSupportInfo(self.getSupportInfo().clone(self))
conf.setReportNumberHolder(self.getReportNumberHolder().clone(self))
for ch in self.getChairList():
conf.addChair(ch.clone())
ContextManager.setdefault("clone.unique_id_map", {})[self.getUniqueId()] = conf.getUniqueId()
# Contribution Types' List (main detailes of the conference)
for t in self.getContribTypeList() :
conf.addContribType(t.clone(conf))
if options.get("sessions", False):
for entry in self.getSchedule().getEntries():
if isinstance(entry,BreakTimeSchEntry):
conf.getSchedule().addEntry(entry.clone(conf))
db_root = DBMgr.getInstance().getDBConnection().root()
if db_root.has_key( "webfactoryregistry" ):
confRegistry = db_root["webfactoryregistry"]
else:
confRegistry = OOBTree.OOBTree()
db_root["webfactoryregistry"] = confRegistry
meeting=False
# if the event is a meeting or a lecture
if confRegistry.get(str(self.getId()), None) is not None :
meeting=True
confRegistry[str(conf.getId())] = confRegistry[str(self.getId())]
# if it's a conference, no web factory is needed
# Tracks in a conference
if options.get("tracks",False) :
for tr in self.getTrackList() :
conf.addTrack(tr.clone(conf))
# Meetings' and conferences' sessions cloning
if options.get("sessions",False) :
for s in self.getSessionList() :
newSes = s.clone(timeDelta, conf, options, session_id=s.getId())
ContextManager.setdefault("clone.unique_id_map", {})[s.getUniqueId()] = newSes.getUniqueId()
conf.addSession(newSes)
# access and modification keys
if options.get("keys", False) :
conf.setAccessKey(self.getAccessKey())
conf.setModifKey(self.getModifKey())
# Access Control cloning
if options.get("access", False):
conf.setProtection(self.getAccessController()._getAccessProtection())
for entry in self.as_event.acl_entries:
conf.as_event.update_principal(entry.principal, read_access=entry.read_access,
full_access=entry.full_access, roles=entry.roles, quiet=True)
for user in self.getAllowedToAccessList():
conf.grantAccess(user)
for right in self.getSessionCoordinatorRights():
conf.addSessionCoordinatorRight(right)
for domain in self.getDomainList():
conf.requireDomain(domain)
# conference's registration form
if options.get("registration",False) :
conf.setRegistrationForm(self.getRegistrationForm().clone(conf))
#conference's abstracts
if options.get("abstracts",False) :
conf.abstractMgr = self.abstractMgr.clone(conf)
# Meetings' and conferences' contributions cloning
if options.get("contributions",False) :
sch = conf.getSchedule()
for cont in self.getContributionList():
if cont.getSession() is None :
if not meeting:
nc = cont.clone(conf, options, timeDelta)
conf.addContribution(nc)
if cont.isScheduled() :
sch.addEntry(nc.getSchEntry())
ContextManager.setdefault("clone.unique_id_map", {})[cont.getUniqueId()] = nc.getUniqueId()
elif cont.isScheduled():
# meetings...only scheduled
nc = cont.clone(conf, options, timeDelta)
conf.addContribution(nc)
sch.addEntry(nc.getSchEntry())
ContextManager.setdefault("clone.unique_id_map", {})[cont.getUniqueId()] = nc.getUniqueId()
# Participants' module settings and list cloning
if options.get("participants",False) :
self.getParticipation().clone(conf, options, eventManager)
conf.notifyModification()
# Copy the list of enabled features
features_event_settings.set_multi(conf, features_event_settings.get_all(self))
# Run the new modular cloning system
EventCloner.clone_event(self, conf)
return conf
def getCoordinatedTracks( self, av ):
"""Returns a list with the tracks for which a user is coordinator.
"""
try:
if self._trackCoordinators:
pass
except AttributeError:
self._trackCoordinators = TCIndex()
self.notifyModification()
return self._trackCoordinators.getTracks( av )
def addTrackCoordinator( self, track, av ):
"""Makes a user become coordinator for a track.
"""
try:
if self._trackCoordinators:
pass
except AttributeError:
self._trackCoordinators = TCIndex()
self.notifyModification()
if track in self.program:
track.addCoordinator( av )
self._trackCoordinators.indexCoordinator( av, track )
self.notifyModification()
def removeTrackCoordinator( self, track, av ):
"""Removes a user as coordinator for a track.
"""
try:
if self._trackCoordinators:
pass
except AttributeError:
self._trackCoordinators = TCIndex()
self.notifyModification()
if track in self.program:
track.removeCoordinator( av )
self._trackCoordinators.unindexCoordinator( av, track )
self.notifyModification()
def _rebuildAuthorIndex(self):
self._authorIdx=AuthorIndex()
for contrib in self.getContributionList():
if not isinstance(contrib.getCurrentStatus(),ContribStatusWithdrawn):
for auth in contrib.getAuthorList():
self._authorIdx.index(auth)
def getAuthorIndex(self):
try:
if self._authorIdx:
pass
except AttributeError:
self._rebuildAuthorIndex()
return self._authorIdx
def indexAuthor(self,auth):
c=auth.getContribution()
if c.isAuthor(auth):
if not isinstance(c.getCurrentStatus(),ContribStatusWithdrawn):
self.getAuthorIndex().index(auth)
if c.isPrimaryAuthor(auth):
self._getPrimAuthIndex().index(auth)
def unindexAuthor(self,auth):
c=auth.getContribution()
if c.isAuthor(auth):
self.getAuthorIndex().unindex(auth)
if c.isPrimaryAuthor(auth):
self._getPrimAuthIndex().unindex(auth)
def _rebuildSpeakerIndex(self):
self._speakerIdx=AuthorIndex()
for contrib in self.getContributionList():
if not isinstance(contrib.getCurrentStatus(),ContribStatusWithdrawn):
for auth in contrib.getSpeakerList():
self._speakerIdx.index(auth)
for subcontrib in contrib.getSubContributionList():
for auth in subcontrib.getSpeakerList():
self._speakerIdx.index(auth)
def getSpeakerIndex(self):
try:
if self._speakerIdx:
pass
except AttributeError:
self._rebuildSpeakerIndex()
return self._speakerIdx
def indexSpeaker(self,auth):
c=auth.getContribution()
if not isinstance(c.getCurrentStatus(),ContribStatusWithdrawn):
self.getSpeakerIndex().index(auth)
def unindexSpeaker(self,auth):
c=auth.getContribution()
if c and not isinstance(c.getCurrentStatus(),ContribStatusWithdrawn):
self.getSpeakerIndex().unindex(auth)
def getRegistrationForm(self):
try:
if self._registrationForm is None:
self._registrationForm = registration.RegistrationForm(self)
except AttributeError,e:
self._registrationForm = registration.RegistrationForm(self)
return self._registrationForm
def setRegistrationForm(self,rf):
self._registrationForm = rf
rf.setConference(self)
def removeRegistrationForm(self):
try:
self._registrationForm.delete()
self._registrationForm.setConference(None)
self._registrationForm = None
except AttributeError:
self._registrationForm = None
def recoverRegistrationForm(self, rf):
self.setRegistrationForm(rf)
rf.recover()
## Videoconference bookings related
def getBookings(self):
try:
if self._bookings:
pass
except AttributeError, e:
self._bookings = {}
self.notifyModification()
return self._bookings
def getBookingsList(self, sort = False):
bl = self.getBookings().values()
if sort:
bl.sort()
return bl
def _getBookingGenerator(self):
try:
return self._bookingGenerator
except AttributeError, e:
self._bookingGenerator = Counter()
return self._bookingGenerator
def getNewBookingId(self):
return str(self._getBookingGenerator().newCount())
def addBooking(self, bp):
if (bp.getId() == ""):
bp.setId(self.getNewBookingId())
self.getBookings()[bp.getId()] = bp
self.notifyModification()
def hasBooking(self,booking):
return booking.getConference()==self and \
self.getBookings().has_key(booking.getId())
def removeBooking(self, booking):
if self.hasBooking(booking):
deletion= booking.deleteBooking()
if deletion[0] != 1:
del self.getBookings()[booking.getId()]
self.notifyModification()
return deletion
def getBookingByType(self, type):
if self.getBookings().has_key(type):
return self.getBookings()[type]
return None
def getBookingById(self, id):
if self.getBookings().has_key(id):
return self.getBookings()[id]
return None
## End of Videoconference bookings related
def getRegistrants(self):
try:
if self._registrants:
pass
except AttributeError, e:
self._registrants = {}
self.notifyModification()
return self._registrants
def getRegistrantsByEmail(self, email=None):
"""
Returns the index of registrants by email OR a specific registrant if an email address
is passed as argument.
"""
try:
if self._registrantsByEmail:
pass
except AttributeError, e:
self._registrantsByEmail = self._createRegistrantsByEmail()
self.notifyModification()
if email:
return self._registrantsByEmail.get(email)
return self._registrantsByEmail
def _createRegistrantsByEmail(self):
dicByEmail = {}
for r in self.getRegistrantsList():
dicByEmail[r.getEmail()] = r
return dicByEmail
def getRegistrantsList(self, sort = False):
rl = self.getRegistrants().values()
if sort:
rl.sort(registration.Registrant._cmpFamilyName)
return rl
def _getRegistrantGenerator(self):
try:
return self._registrantGenerator
except AttributeError, e:
self._registrantGenerator = Counter()
return self._registrantGenerator
def addRegistrant(self, rp, user):
rp.setId( str(self._getRegistrantGenerator().newCount()) )
rp.setOwner( self )
self.getRegistrants()[rp.getId()] = rp
signals.event.registrant_changed.send(self, user=user, registrant=rp, action='added')
self.notifyModification()
def updateRegistrantIndexByEmail(self, rp, newEmail):
oldEmail = rp.getEmail()
if oldEmail != newEmail:
if self.getRegistrantsByEmail().has_key(oldEmail):
del self.getRegistrantsByEmail()[oldEmail]
self.getRegistrantsByEmail()[newEmail] = rp
self.notifyModification()
def hasRegistrant(self,rp):
return rp.getConference()==self and \
self.getRegistrants().has_key(rp.getId())
def hasRegistrantByEmail(self, email):
# Return true if there is someone with the email of the param "email"
return self.getRegistrantsByEmail().has_key(email)
def removeRegistrant(self, id):
part = self.getRegistrants()[id]
self._registrationForm.notifyRegistrantRemoval(self.getRegistrants()[id])
del self.getRegistrantsByEmail()[self.getRegistrantById(id).getEmail()]
del self.getRegistrants()[id]
signals.event.registrant_changed.send(self, user=part.getAvatar(), registrant=part, action='removed')
TrashCanManager().add(part)
self.notifyModification()
def getRegistrantById(self, id):
if self.getRegistrants().has_key(id):
return self.getRegistrants()[id]
return None
def _getPrimAuthIndex(self):
try:
if self._primAuthIdx:
pass
except AttributeError:
self._primAuthIdx=_PrimAuthIdx(self)
return self._primAuthIdx
def getContribsMatchingAuth(self,query,onlyPrimary=True):
if str(query).strip()=="":
return self.getContributionList()
res=self._getPrimAuthIndex().match(query)
return [self.getContributionById(id) for id in res]
def getCoordinatedSessions( self, av ):
"""Returns a list with the sessions for which a user is coordinator.
"""
try:
if self._sessionCoordinators:
pass
except AttributeError:
self._sessionCoordinators = SCIndex()
sessions = self._sessionCoordinators.getSessions( av )
for session in self.getSessionList():
if session not in sessions and av != None:
for email in av.getEmails():
if email in session.getCoordinatorEmailList():
sessions.append(session)
break
return sessions
def getManagedSession( self, av ):
ls = []
for session in self.getSessionList():
pending = False
if av != None:
for email in av.getEmails():
if email in session.getAccessController().getModificationEmail():
pending = True
break
if av in session.getManagerList() or pending:
ls.append(session)
return ls
def addSessionCoordinator(self,session,av):
"""Makes a user become coordinator for a session.
"""
try:
if self._sessionCoordinators:
pass
except AttributeError:
self._sessionCoordinators = SCIndex()
if self.sessions.has_key(session.getId()):
session.addCoordinator(av)
self._sessionCoordinators.index(av,session)
session._addCoordinatorEmail(av.getEmail())
def removeSessionCoordinator( self, session, av ):
"""Removes a user as coordinator for a session.
"""
try:
if self._sessionCoordinators:
pass
except AttributeError:
self._sessionCoordinators = SCIndex()
if self.sessions.has_key(session.getId()):
session.removeCoordinator( av )
self._sessionCoordinators.unindex(av,session)
session.removeCoordinatorEmail(av.getEmail())
def _getSubmitterIdx(self):
try:
return self._submitterIdx
except AttributeError:
self._submitterIdx=SubmitterIndex()
return self._submitterIdx
def addContribSubmitter(self,contrib,av):
self._getSubmitterIdx().index(av,contrib)
def removeContribSubmitter(self,contrib,av):
self._getSubmitterIdx().unindex(av,contrib)
def getContribsForSubmitter(self,av):
return self._getSubmitterIdx().getContributions(av)
def getBOAConfig(self):
try:
if self._boa:
pass
except AttributeError:
self._boa=BOAConfig(self)
return self._boa
def getSessionCoordinatorRights(self):
try:
if self._sessionCoordinatorRights:
pass
except AttributeError, e:
self._sessionCoordinatorRights = []
self.notifyModification()
return self._sessionCoordinatorRights
def hasSessionCoordinatorRight(self, right):
return right in self.getSessionCoordinatorRights()
def addSessionCoordinatorRight(self, right):
if SessionCoordinatorRights().hasRight(right) and not self.hasSessionCoordinatorRight(right):
self._sessionCoordinatorRights.append(right)
self.notifyModification()
def removeSessionCoordinatorRight(self, right):
if SessionCoordinatorRights().hasRight(right) and self.hasSessionCoordinatorRight(right):
self._sessionCoordinatorRights.remove(right)
self.notifyModification()
def hasEnabledSection(self, section):
# This hack is there since there is no more enable/disable boxes
# in the conference managment area corresponding to those features.
# Until the managment area is improved to get a more user-friendly
# way of enabling/disabling those features, we always make them
# available for the time being, but we keep the previous code for
# further improvements
return True
def getPendingQueuesMgr(self):
try:
if self._pendingQueuesMgr:
pass
except AttributeError, e:
self._pendingQueuesMgr=pendingQueues.ConfPendingQueuesMgr(self)
return self._pendingQueuesMgr
def getAccessController(self):
return self.__ac
def _cmpTitle( c1, c2 ):
o1 = c1.getTitle().lower().strip()
o2 = c2.getTitle().lower().strip()
return cmp( o1, o2 )
_cmpTitle=staticmethod(_cmpTitle)
def getReportNumberHolder(self):
try:
if self._reportNumberHolder:
pass
except AttributeError, e:
self._reportNumberHolder=ReportNumberHolder(self)
return self._reportNumberHolder
def setReportNumberHolder(self, rnh):
self._reportNumberHolder=rnh
def getBadgeTemplateManager(self):
try:
if self.__badgeTemplateManager:
pass
except AttributeError:
self.__badgeTemplateManager = BadgeTemplateManager(self)
return self.__badgeTemplateManager
def setBadgeTemplateManager(self, badgeTemplateManager):
self.__badgeTemplateManager = badgeTemplateManager
def getPosterTemplateManager(self):
try:
if self.__posterTemplateManager:
pass
except AttributeError:
self.__posterTemplateManager = PosterTemplateManager(self)
return self.__posterTemplateManager
def setPosterTemplateManager(self, posterTemplateManager):
self.__posterTemplateManager = posterTemplateManager
class DefaultConference(Conference):
""" 'default' conference, which stores the
default templates for posters and badges
"""
def indexConf(self):
pass
def notifyModification(self, *args, **kwargs):
pass
def __init__(self):
Conference.__init__(self, id='default')
class ConferenceHolder( ObjectHolder ):
"""Specialised ObjectHolder dealing with conference objects. It gives a
common entry point and provides simple methods to access and
maintain the collection of stored conferences (DB).
"""
idxName = "conferences"
counterName = "CONFERENCE"
def _newId(self):
raise RuntimeError('Tried to get new event id from zodb')
@unify_user_args
def add(self, conf, creator):
from indico.modules.events import Event
event = Event(creator=creator)
db.session.add(event)
db.session.flush()
conf.setId(event.id)
if conf.id in self._getIdx():
raise RuntimeError('{} is already in ConferenceHolder'.format(conf.id))
ObjectHolder.add(self, conf)
with event.logging_disabled:
event.update_principal(creator, full_access=True)
db.session.flush()
def getById(self, id, quiet=False):
if id == 'default':
return CategoryManager().getDefaultConference()
id = str(id)
if is_legacy_id(id):
mapping = LegacyEventMapping.find_first(legacy_event_id=id)
id = str(mapping.event_id) if mapping is not None else None
event = self._getIdx().get(id) if id is not None else None
if event is None and not quiet:
raise NotFoundError(_("The event with id '{}' does not exist or has been deleted").format(id),
title=_("Event not found"))
return event
class SessionChair(ConferenceParticipation):
def __init__(self):
self._session=None
self._id=""
ConferenceParticipation.__init__(self)
def _notifyModification( self ):
if self._session != None:
self._session.notifyModification()
def clone(self):
chair = SessionChair()
chair.setValues(self.getValues())
return chair
def getSession(self):
return self._session
def getConference(self):
s=self.getSession()
if s is None:
return None
return s.getConference()
def includeInSession(self,session,id):
if self.getSession()==session and self.getId()==id.strip():
return
self._session=session
self._id=id
def delete( self ):
self._session=None
ConferenceParticipation.delete(self)
def getLocator(self):
if self.getSession() is None:
return None
loc=self.getSession().getLocator()
loc["convId"]=self.getId()
return loc
def isSessionManager(self):
# pendings managers
if self.getEmail() in self._session.getAccessController().getModificationEmail():
return True
# managers list
for manager in self._session.getManagerList():
if self.getEmail() == manager.getEmail():
return True
return False
def isSessionCoordinator(self):
# pendings coordinators
if self.getEmail() in self._session.getConference().getPendingQueuesMgr().getPendingCoordinatorsKeys():
return True
# coordinator list
for coord in self._session.getCoordinatorList():
if self.getEmail() == coord.getEmail():
return True
return False
class SlotChair(ConferenceParticipation):
def __init__(self):
self._slot=None
self._id=""
ConferenceParticipation.__init__(self)
def _notifyModification( self ):
if self._slot != None:
self._slot.notifyModification()
def clone(self):
chair = SlotChair()
chair.setValues(self.getValues())
return chair
def getSlot(self):
return self._slot
def getSession(self):
s=self.getSlot()
if s is None:
return None
return s.getSession()
def getConference(self):
s=self.getSlot()
if s is None:
return None
return s.getConference()
def includeInSlot(self,slot,id):
if self.getSlot()==slot and self.getId()==id.strip():
return
self._slot=slot
self._id=id
def delete( self ):
self._slot=None
ConferenceParticipation.delete(self)
def getLocator(self):
if self.getSlot() is None:
return None
loc=self.getSlot().getLocator()
loc["convId"]=self.getId()
return loc
class SessionCoordinatorRights:
def __init__(self):
self._rights = {"modifContribs": "Modify the contributions",
"unrestrictedSessionTT": "Unrestricted session timetable management"
}
def hasRight(self, r):
return self._rights.has_key(r)
def getRights(self):
return self._rights
def getRightList(self, sort=False):
l=self._rights.values()
if sort:
l.sort()
return l
def getRightKeys(self):
return self._rights.keys()
def getRight(self, id):
if self._rights.has_key(id):
return self._rights[id]
return None
class SCIndex(Persistent):
"""Index for conference session coordinators.
This class allows to index conference session coordinators so the owner
can answer optimally to the query if a user is coordinating
any conference session.
It is implemented by simply using a BTree where the Avatar id is used
as key (because it is unique and non variable) and a list of
coordinated sessions is kept as keys. It is the responsability of the
index owner (conference) to keep it up-to-date i.e. notify session
coordinator additions and removals.
"""
def __init__( self ):
self._idx=OOBTree()
def getSessions(self,av):
"""Gives a list with the sessions a user is coordinating.
"""
if av == None:
return []
return self._idx.get(av.getId(),[])
def index(self,av,session):
"""Registers in the index a coordinator of a session.
"""
if av == None or session == None:
return
if not self._idx.has_key(av.getId()):
l=[]
self._idx[av.getId()]=l
else:
l=self._idx[av.getId()]
if session not in l:
l.append(session)
self.notifyModification()
def unindex(self,av,session):
if av==None or session==None:
return
l=self._idx.get(av.getId(),[])
if session in l:
l.remove(session)
self.notifyModification()
def notifyModification(self):
self._idx._p_changed=1
class Session(CommonObjectBase, Locatable):
"""This class implements a conference session, being the different parts
in which the conference can be divided and the contributions can be
organised in. The class contains necessary attributes to store session
basic data and provides the operations related to sessions. In
principle, a session has no sense to exist without being related to a
conference but it is allowed for flexibility.
"""
fossilizes(ISessionFossil)
def __init__(self, **sessionData):
"""Class constructor. Initialise the class attributes to the default
values.
Params:
sessionData -- (Dict) Contains the data the session object has to
be initialised to.
"""
self.conference=None
self.id="not assigned"
self.title=""
self.description=""
#################################
# Fermi timezone awareness #
#################################
self.startDate = nowutc()
#################################
# Fermi timezone awareness(end) #
#################################
self.duration=timedelta(minutes=1)
self.places=[]
self.rooms=[]
self.conveners=[] # This attribute must not be used and should disappear someday
self._conveners=[]
self._convenerGen=Counter()
self.convenerText=""
self.contributions={}
self._contributionDuration=timedelta(minutes=20)
self.__ac=AccessController(self)
self.materials={}
self.__materialGenerator=Counter()
self._comments = ""
self.slots={}
self.__slotGenerator=Counter()
self._setSchedule()
self._coordinators=OOBTree()
self._coordinatorsEmail = []
self._code=""
self._color="#e3f2d3"
self._textColor="#202020"
self._textColorToLinks=False
self._ttType=SlotSchTypeFactory.getDefaultId()
self._closed = False
self._registrationSession = None
self._creationDS = nowutc()
self._modificationDS = nowutc()
self._keywords = ""
@return_ascii
def __repr__(self):
event_id = self.conference.getId() if self.conference else None
return '<Session({}, {}, {})>'.format(self.getId(), self.getTitle(), event_id)
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
@property
@memoize_request
def note(self):
from indico.modules.events.notes.models.notes import EventNote
return EventNote.get_for_linked_object(self)
def getVerboseType(self):
return 'Session'
def getTimezone( self ):
return self.getConference().getTimezone()
def updateNonInheritingChildren(self, elem, delete=False, propagate=True):
self.getAccessController().updateNonInheritingChildren(elem, delete)
if propagate == True:
self.notify_protection_to_owner(elem, delete)
def notify_protection_to_owner(self, elem, delete=False):
""" This methods notifies the owner that the protection has been changed,
so it can update its list of non inheriting children """
self.getOwner().updateNonInheritingChildren(elem, delete)
def getKeywords(self):
try:
return self._keywords
except:
self._keywords = ""
return ""
def setKeywords(self, keywords):
self._keywords = keywords
def notifyModification( self, raiseEvent = True, date = None, cleanCache = True ):
"""Method called to notify the current session has been modified.
"""
self.setModificationDate(date)
parent = self.getConference()
if parent:
parent.setModificationDate(date)
if cleanCache:
for slot in self.getSlotList():
slot.cleanCache()
self._p_changed=1
def getModificationDate( self ):
"""Returns the date in which the session was last modified"""
try:
return self._modificationDS
except:
self._modificationDS = nowutc()
return self._modificationDS
def getCreationDate( self ):
"""Returns the date in which the session was created"""
try:
return self._creationDS
except:
self._creationDS = nowutc()
return self._creationDS
def getLogInfo(self):
data = {}
data["subject"] = self.title
data["session id"] = self.id
data["session code"] = self._code
data["title"] = self.title
data["description"] = self.description
data["start date"] = format_datetime(self.startDate, locale='en_GB', timezone=self.getConference().timezone)
data["duration"] = format_human_timedelta(self.duration)
for p in self.places :
data["place"] = p.getName()
for r in self.rooms :
data["room"] = r.getName()
for sc in self.getConvenerList() :
data["convener %s"%sc.getId()] = sc.getFullName()
for co in self.getCoordinatorList() :
data["coordinators %s"%co.getId()] = co.getFullName()
return data
def getEnableSessionSlots(self):
try:
return self.getConference().getEnableSessionSlots()
except:
return True
def cmpSessionByTitle(session1, session2):
return cmp(session1.getTitle(), session2.getTitle())
cmpSessionByTitle = staticmethod(cmpSessionByTitle)
def hasRegistrationSession(self):
return self.getRegistrationSession() is not None
def getRegistrationSession(self):
try:
if self._registrationSession:
pass
except AttributeError, e:
self._registrationSession = None
return self._registrationSession
def setRegistrationSession(self, rs):
self._registrationSession = rs
def isClosed( self ):
if self.getConference().isClosed():
return True
try:
return self._closed
except:
self._closed = False
return False
def setClosed( self, closed=True ):
self._closed = closed
self.notifyModification(cleanCache = False)
def includeInConference(self,conf,newId):
self.conference=conf
self.id=newId
for slot in self.getSlotList():
conf.getSchedule().addEntry(slot.getConfSchEntry(),2)
self.getConference().addSession(self)
self.notifyModification()
def delete(self):
while len(self.getConvenerList()) > 0:
self.removeConvener(self.getConvenerList()[0])
for c in self.getCoordinatorList()[:]:
self.removeCoordinator(c)
while len(self.contributions.values())>0:
self.removeContribution(self.contributions.values()[0])
while len(self.slots.values())>0:
self._removeSlot(self.slots.values()[0])
if self.getConference() is not None:
self.getConference().removeSession(self)
if self.hasRegistrationSession():
self.getConference().getRegistrationForm().getSessionsForm().removeSession(self.getId())
self.getRegistrationSession().setRegistrationForm(None)
TrashCanManager().add(self.getRegistrationSession())
self.notify_protection_to_owner(self, delete=True)
self.conference=None
TrashCanManager().add(self)
def recover(self, isCancelled):
if self.hasRegistrationSession():
if not isCancelled:
self.getRegistrationSession().setRegistrationForm(self.getConference().getRegistrationForm())
self.getConference().getRegistrationForm().getSessionsForm().addSession(self.getRegistrationSession())
TrashCanManager().remove(self.getRegistrationSession())
TrashCanManager().remove(self)
def getLocator( self ):
"""Gives back a globaly unique identification encapsulated in a Locator
object for the session instance
"""
if self.conference == None:
return Locator()
lconf = self.conference.getLocator()
lconf["sessionId"] = self.getId()
return lconf
def getConference( self ):
return self.conference
def getSession( self ):
return self
def getOwner( self ):
return self.getConference()
def getId( self ):
return self.id
def getUniqueId( self ):
"""returns (string) the unique identiffier of the item"""
"""used mainly in the web session access key table"""
return "%ss%s" % (self.getConference().getUniqueId(),self.id)
def getModifKey( self ):
return self.getConference().getModifKey()
def getAccessKey( self ):
return self.getConference().getAccessKey()
def getContribDuration(self):
try:
return self._contributionDuration
except:
self._contributionDuration = timedelta(minutes=20)
return self._contributionDuration
def setContribDuration(self, hour=0, min=20, dur=None):
if dur is not None:
self._contributionDuration=dur
else:
self._contributionDuration = timedelta(hours=hour,minutes=min)
def fit(self):
#if not self.getConference().getEnableSessionSlots():
# self.getSlotList()[0].fit()
self.setStartDate(self.getMinSlotStartDate(),0,0)
self.setEndDate(self.getMaxSlotEndDate(),0)
def addSlot(self,newSlot):
id = newSlot.getId()
if id == "not assigned":
newSlot.setId(str(self.__slotGenerator.newCount()))
self.slots[newSlot.getId()]=newSlot
self.fit()
self.getSchedule().addEntry(newSlot.getSessionSchEntry(),2)
if self.getConference() is not None:
self.getConference().getSchedule().addEntry(newSlot.getConfSchEntry(),2)
self.notifyModification()
def _removeSlot(self,slot):
del self.slots[slot.getId()]
self._p_changed = True
self.getSchedule().removeEntry(slot.getSessionSchEntry())
if self.getConference() is not None:
self.getConference().getSchedule().removeEntry(slot.getConfSchEntry())
slot.delete()
def removeSlot(self, slot, force=False):
if self.slots.has_key(slot.getId()):
if len(self.slots)==1 and not force:
raise MaKaCError( _("A session must have at least one slot"), _("Session"))
msg = u'Deleted session block: {}'.format(to_unicode(slot.getTitle() or slot.getSession().getTitle()))
self.getConference().log(EventLogRealm.management, EventLogKind.negative, u'Timetable',
msg, session.user, data=slot.getLogInfo())
self._removeSlot(slot)
self.fit()
self.notifyModification()
def recoverSlot(self, slot):
self.addSlot(slot)
slot.recover()
def getSlotById(self,slotId):
return self.slots.get(slotId,None)
def getSlotList(self):
return self.slots.values()
def getSortedSlotList(self):
sl = self.getSlotList()
sl.sort(key=lambda s: s.getStartDate())
return sl
def getMinSlotStartTime(self):
min = (25,61)
for slot in self.getSlotList():
if slot.isMoreThanDay():
return (0,0)
shour = slot.getStartDate().hour
smin = slot.getStartDate().minute
if (shour, smin) < min:
min = (shour, smin)
return min
def getMaxSlotEndTime(self):
max = (-1,-1)
for slot in self.getSlotList():
if slot.isMoreThanDay():
return (23, 59)
endDate = slot.getEndDate()
if (endDate.hour, endDate.minute) > max:
newEndDate = endDate - timedelta(0, 0, 0)
max = (newEndDate.hour, newEndDate.minute)
return max
def getMinSlotStartDate(self):
slotList = self.getSlotList()
if len(slotList)==0:
return self.getStartDate()
else:
sDate = self.getEndDate()
for slot in slotList:
if slot.getStartDate() < sDate:
sDate = slot.getStartDate()
return sDate
def getMaxSlotEndDate(self):
slotList = self.getSlotList()
if len(slotList)==0:
return self.getEndDate()
else:
eDate = self.getStartDate()
for slot in slotList:
if slot.getEndDate() > eDate:
eDate = slot.getEndDate()
return eDate
def _getCorrectColor(self, color):
if not color.startswith("#"):
color = "#%s"%color
m = re.match("^#[0-9A-Fa-f]{6}$", color)
if m:
return color
return None
def _getCorrectBgColor(self, color):
color=self._getCorrectColor(color)
if color is None:
return self._color
return color
def _getCorrectTextColor(self, color):
color=self._getCorrectColor(color)
if color is None:
return self._textColor
return color
def setValues( self, sessionData,check=2,moveEntries=0 ):
"""Sets all the values of the current session object from a dictionary
containing the following key-value pairs:
title-(str)
description-(str)
locationName-(str) => name of the location, if not specified
it will be set to the conference location name.
locationAddress-(str)
roomName-(str) => name of the room, if not specified it will
be set to the conference room name.
sDate - (datetime) => starting date of the session, if not
specified it will be set to now.
eDate - (datetime) => ending date of the session, if not
specified the end date will be set to the start one
durHour - (int) => hours of duration for each entry in the session
by default.
durMin - (int) => hours of duration for each entry in the session
by default.
_conveners - (str)
check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates
Please, note that this method sets ALL values which means that if
the given dictionary doesn't contain any of the keys the value
will set to a default value.
"""
self.setTitle( sessionData.get("title", "NO TITLE ASSIGNED") )
self.setDescription( sessionData.get("description", "") )
code = sessionData.get("code", "")
if code.strip() == "":
if self.getId()=="not assigned":
self.setCode("no code")
else:
self.setCode(self.getId())
else:
self.setCode(code)
bgcolor = sessionData.get("backgroundColor", "")
if bgcolor.strip() != "":
self.setColor(self._getCorrectBgColor(bgcolor))
textcolor = sessionData.get("textColor", "")
if textcolor.strip() != "":
if sessionData.has_key("autotextcolor"):
self.setTextColor(utils.getTextColorFromBackgroundColor(self.getColor()))
else:
self.setTextColor(self._getCorrectTextColor(textcolor))
self.setTextColorToLinks(sessionData.has_key("textcolortolinks"))
if "locationName" in sessionData:
loc = self.getOwnLocation()
if not loc:
loc = CustomLocation()
self.setLocation( loc )
loc.setName( sessionData["locationName"] )
loc.setAddress( sessionData.get("locationAddress", "") )
else:
self.setLocation(None)
#same as for the location
if "roomName" in sessionData:
room = self.getOwnRoom()
if not room:
room = CustomRoom()
self.setRoom( room )
room.setName( sessionData["roomName"] )
else:
self.setRoom(None)
if sessionData.get("sDate",None) is not None:
self.setStartDate(sessionData["sDate"],check,moveEntries=moveEntries)
if sessionData.get("eDate",None) is not None:
self.setEndDate(sessionData["eDate"],check)
self._checkInnerSchedule()
if sessionData.get("contribDuration","")!="":
self._contributionDuration = sessionData.get("contribDuration")
else:
self._contributionDuration = timedelta(hours=int(sessionData.get("durHour",0)), minutes=int(sessionData.get("durMin",20)))
self.notifyModification()
def move(self, sDate):
"""
Move a session from the old start date to a new start date, and
it moves all the entries of the session as well, without date validations.
"""
if sDate is not None:
oldStartDate=self.startDate
self.startDate=copy.copy(sDate)
diff=self.startDate-oldStartDate
# Check date to not be prior conference start date and to not surpass conference end date
# The schedule is returning the datetime object as timezone aware relative to the conference
# timezone. Must adjust the startdate accordingly for comparison. JMF
conftz = self.getConference().getTimezone()
if self.getStartDate() < self.getConference().getSchedule().getStartDate() or \
self.getEndDate() > self.getConference().getSchedule().getEndDate():
raise MaKaCError( _("Impossible to move the session because it would be out of the conference dates"))
for entry in self.getSchedule().getEntries():
if isinstance(entry,LinkedTimeSchEntry) and \
isinstance(entry.getOwner(), SessionSlot):
e = entry.getOwner()
e.move(e.getStartDate() + diff)
self.getSchedule().reSchedule()
self.getConference().getSchedule().reSchedule()
self.notifyModification()
def clone(self, deltaTime, conf, options, session_id=None):
ses = Session()
conf.addSession(ses, check=0, session_id=session_id)
ses.setTitle(self.getTitle())
ses.setDescription(self.getDescription())
startDate = self.getStartDate() + deltaTime
ses.setStartDate(startDate, check=1)
ses.setDuration(dur=self.getDuration())
if self.getOwnLocation() is not None:
ses.addLocation(self.getOwnLocation().clone())
if self.getOwnRoom() is not None:
ses.setRoom(self.getOwnRoom().clone())
ses.setColor(self.getColor())
ses.setTextColor(self.getTextColor())
ses.setTextColorToLinks(self.isTextColorToLinks())
ses.setCode(self.getCode())
ses.setContribDuration(dur=self.getContribDuration())
ses.setScheduleType(self.getScheduleType())
ses.setComments(self.getComments())
# Access Control cloning
if options.get("access", False) :
ses.setProtection(self.getAccessController()._getAccessProtection())
for mgr in self.getManagerList() :
ses.grantModification(mgr)
for user in self.getAllowedToAccessList() :
ses.grantAccess(user)
for domain in self.getDomainList():
ses.requireDomain(domain)
for coord in self.getCoordinatorList():
ses.addCoordinator(coord)
#slots in timeschedule
for slot in self.getSlotList() :
newslot = slot.clone(ses, options)
ses.addSlot(newslot)
ContextManager.setdefault("clone.unique_id_map", {})[slot.getUniqueId()] = newslot.getUniqueId()
ses.notifyModification()
return ses
def setTitle( self, newTitle ):
self.title = newTitle
self.notifyModification()
def getTitle( self ):
return self.title
def setDescription(self, newDescription ):
self.description = newDescription
self.notifyModification()
def getDescription(self):
return self.description
def getCode(self):
try:
if self._code:
pass
except AttributeError:
self._code=self.id
return self._code
def setCode(self,newCode):
self._code=str(newCode).strip()
def getColor(self):
try:
if self._color:
pass
except AttributeError:
self._color="#e3f2d3"
return self._color
getBgColor=getColor
def setColor(self,newColor):
self._color=str(newColor).strip()
self.notifyModification()
setBgColor=setColor
def getTextColor(self):
try:
if self._textColor:
pass
except AttributeError:
self._textColor="#202020"
return self._textColor
def setTextColor(self,newColor):
self._textColor=str(newColor).strip()
self.notifyModification()
def isTextColorToLinks(self):
try:
if self._textColorToLink:
pass
except AttributeError:
self._textColorToLink=False
return self._textColorToLink
def setTextColorToLinks(self, v):
self._textColorToLink=v
self.notifyModification()
def getStartDate(self):
return self.startDate
def getAdjustedStartDate(self,tz=None):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.startDate.astimezone(timezone(tz))
def verifyStartDate(self, sdate, check=2):
"""check parameter:
0: no check at all
1: check and raise error in case of problem (default)
2: check and adapt the owner dates
"""
conf=self.getConference()
if conf is not None and sdate < conf.getSchedule().getStartDate():
if check==1:
raise ParentTimingError( _("The session starting date cannot be prior to the event starting date"), _("Session"))
elif check==2:
ContextManager.get('autoOps').append((self, "OWNER_START_DATE_EXTENDED",
conf, sdate.astimezone(timezone(conf.getTimezone()))))
conf.setStartDate(sdate,check=0,moveEntries=0)
def setStartDate(self,newDate,check=2,moveEntries=0):
"""
moveEntries parameter:
0: do not move inner slots
1: move
2: do not move but check that session is not out of the conference dates
"""
if not newDate.tzname():
raise MaKaCError("date should be timezone aware")
if check != 0:
self.verifyStartDate(newDate,check)
oldSdate = self.getStartDate()
try:
tz = str(self.getStartDate().tzinfo)
except:
tz = 'UTC'
diff = newDate - oldSdate
self.startDate=copy.copy(newDate)
if moveEntries == 1 and diff is not None and diff != timedelta(0):
# If the start date changed, we move entries inside the timetable
newDateTz = newDate.astimezone(timezone(tz))
if oldSdate.astimezone(timezone(tz)).date() != newDateTz.date():
entries = self.getSchedule().getEntries()[:]
else:
entries = self.getSchedule().getEntriesOnDay(newDateTz)[:]
self.getSchedule().moveEntriesBelow(diff, entries)
if moveEntries != 0 and self.getConference() and \
not self.getConference().getEnableSessionSlots() and \
self.getSlotList() != [] and \
self.getSlotList()[0].getStartDate() != newDate:
self.getSlotList()[0].startDate = newDate
if check == 1:
self._checkInnerSchedule()
self.notifyModification()
def _checkInnerSchedule( self ):
self.getSchedule().checkSanity()
def getEndDate(self):
return self.startDate+self.duration
####################################
# Fermi timezone awareness #
####################################
def getAdjustedEndDate(self,tz=None):
return self.getAdjustedStartDate(tz) + self.duration
####################################
# Fermi timezone awareness(end) #
####################################
def verifyEndDate(self, edate,check=1):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates
"""
try:
tz = timezone(self.getConference().getTimezone())
except:
tz = timezone('UTC')
# compare end date with start date
if edate<=self.getStartDate():
if check == 1:
raise MaKaCError( _("End date cannot be prior to the Start date"), _("Session"))
if check == 2:
self.setStartDate(edate)
# check conference dates
if (self.getConference()):
conf=self.getConference()
confStartDate = conf.getSchedule().getStartDate()
confEndDate = conf.getSchedule().getEndDate()
if conf is not None and (edate>confEndDate or edate<=confStartDate):
if check==1:
raise ParentTimingError( _("The end date has to be between the event dates (%s - %s)")%\
(confStartDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
confEndDate.astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Session"))
if check==2:
if edate>confEndDate:
ContextManager.get('autoOps').append((self, "OWNER_END_DATE_EXTENDED",
self.getConference(),
edate.astimezone(tz)))
self.getConference().setEndDate(edate)
if edate<=confStartDate:
ContextManager.get('autoOps').append((self, "OWNER_START_DATE_EXTENDED",
self.getConference(),
edate.astimezone(tz)))
self.getConference().setStartDate(edate)
# check inner schedule
if len(self.getSlotList()) != 0 and self.getSlotList()[-1].getSchedule().hasEntriesAfter(edate):
raise TimingError( _("Cannot change end date: some entries in the session schedule end after the new date"), _("Session"))
def setEndDate(self,newDate,check=2):
if not newDate.tzname():
raise MaKaCError("date should be timezone aware")
if check != 0:
self.verifyEndDate(newDate,check)
self.duration=newDate-self.getStartDate()
# A session is not always linked to a conference (for eg. at creation time)
#if self.getConference() and not self.getConference().getEnableSessionSlots() and self.getSlotList()[0].getEndDate() != newDate:
# self.getSlotList()[0].duration = self.duration
self.notifyModification()
def setDates(self, sDate, eDate, check=1, moveEntries=0):
if eDate <= sDate:
tz = timezone(self.getConference().getTimezone())
raise FormValuesError(_("The end date ({}) cannot be prior to the start date ({})").format(
eDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'), sDate.astimezone(tz).strftime('%Y-%m-%d %H:%M')),
_("Session"))
self.setStartDate(sDate, check, moveEntries)
self.setEndDate(eDate, check)
self._checkInnerSchedule()
def getDuration(self):
return self.duration
def setDuration(self, hours=0, minutes=15, dur=0):
if dur == 0:
dur = timedelta(hours=int(hours), minutes=int(minutes))
if dur.seconds <= 0:
raise FormValuesError(_("The duration cannot be less than zero"), _("Session"))
self.duration = dur
self.verifyEndDate(self.getEndDate())
self.notifyModification()
def getStartOnDay(self, day, tz=None):
if not tz:
tz = self.getConference().getTimezone()
if type(day) is datetime:
day = day.astimezone(timezone(tz))
if day.date() < self.getStartDate().astimezone(timezone(tz)).date() or day.date() > self.getEndDate().astimezone(timezone(tz)).date() :
return None
minTime = self.getEndDate()
for e in self.getSchedule().getEntriesOnDay(day) :
if e.getStartDate() < minTime :
minTime = e.getStartDate()
if minTime == self.getEndDate() :
minTime = day.replace(hour=8, minute=0)#datetime.combine(day,time(hour=8, minute=0))
if minTime < self.getStartDate() :
return self.getStartDate()
return minTime
def getEndOnDay(self, day, tz=None):
if not tz:
tz = self.getConference().getTimezone()
if type(day) is datetime:
day = day.astimezone(timezone(tz))
if day.date() < self.getStartDate().astimezone(timezone(tz)).date() or day.date() > self.getEndDate().astimezone(timezone(tz)).date() :
return None
maxTime = self.getStartDate();
for e in self.getSchedule().getEntriesOnDay(day) :
if e.getEndDate() > maxTime :
maxTime = e.getEndDate()
if maxTime == self.getStartDate() :
maxTime = day.replace(hour=19, minute=0)#datetime.combine(day,time(19,0))
if maxTime > self.getEndDate() :
return self.getEndDate()
return maxTime
def getLocationParent( self ):
"""
Returns the object from which the room/location
information should be inherited
"""
return self.getConference()
def getLocationList(self):
"""Method returning a list of "location" objects which contain the
information about the different places the conference is gonna
happen
"""
return self.places
def addLocation(self, newPlace):
self.places.append( newPlace )
self.notifyModification()
def _resetConveners(self):
try:
if self._conveners:
return
except AttributeError:
self._conveners=[]
for oc in self.conveners:
newConv=SessionChair()
newConv.setDataFromAvatar(oc)
self._addConvener(newConv)
def getConvenerList(self):
self._resetConveners()
return self._conveners
def getAllConvenerList(self):
convenerList = set()
for slot in self.getSlotList():
for convener in slot.getConvenerList():
convenerList.add(convener)
return convenerList
def _addConvener(self,newConv):
if newConv in self._conveners:
return
try:
if self._convenerGen:
pass
except AttributeError:
self._convenerGen=Counter()
id = newConv.getId()
if id == "":
id=int(self._convenerGen.newCount())
newConv.includeInSession(self,id)
self._conveners.append(newConv)
self.notifyModification()
def addConvener(self,newConv):
self._resetConveners()
self._addConvener(newConv)
if isinstance(newConv, AvatarUserWrapper):
conv.unlinkTo(self, "convener")
def removeConvener(self,conv):
self._resetConveners()
if conv not in self._conveners:
return
#--Pending queue: remove pending Convener waiting to became manager if anything
self.getConference().getPendingQueuesMgr().removePendingManager(conv)
#--
#--Pending queue: remove pending Convener waiting to became coordinator if anything
self.getConference().getPendingQueuesMgr().removePendingCoordinator(conv)
#--
self._conveners.remove(conv)
if isinstance(conv, AvatarUserWrapper):
conv.linkTo(self, "convener")
conv.delete()
self.notifyModification()
def recoverConvener(self, con):
self.addConvener(con)
con.recover()
def getConvenerById(self,id):
id=int(id)
for conv in self._conveners:
if conv.getId()==id:
return conv
return None
def getConvenerText( self ):
#to be removed
try:
if self.convenerText:
pass
except AttributeError, e:
self.convenerText = ""
return self.convenerText
def setConvenerText( self, newText ):
self.convenerText = newText.strip()
def appendConvenerText( self, newText ):
self.setConvenerText( "%s, %s"%(self.getConvenerText(), newText.strip()) )
def addContribution(self, newContrib, contrib_id=None):
"""Registers the contribution passed as parameter within the session
assigning it a unique id.
"""
if self.hasContribution(newContrib):
return
self.getConference().addContribution(newContrib, contrib_id=contrib_id)
self.contributions[newContrib.getId()]=newContrib
newContrib.setSession(self)
self.updateNonInheritingChildren(newContrib)
for child in newContrib.getAccessController().getNonInheritingChildren():
self.updateNonInheritingChildren(child)
self.notifyModification()
def hasContribution(self,contrib):
return contrib.getSession()==self and \
self.contributions.has_key(contrib.getId())
def removeContribution(self,contrib):
"""Removes the indicated contribution from the session
"""
if not self.hasContribution(contrib):
return
if contrib.isScheduled():
# unschedule the contribution
sch=contrib.getSchEntry().getSchedule()
sch.removeEntry(contrib.getSchEntry())
del self.contributions[contrib.getId()]
self._p_changed = True
self.updateNonInheritingChildren(contrib, delete=True, propagate=False)
for child in contrib.getAccessController().getNonInheritingChildren():
self.updateNonInheritingChildren(child, delete=True, propagate=False)
contrib.setSession(None)
self.notifyModification()
def newContribution( self, params = None, id=None ):
c = Contribution()
if params:
c.setValues(params)
self.addContribution( c, id )
return c
def getContributionById(self,id):
id=str(id).strip()
if self.contributions.has_key( id ):
return self.contributions[ id ]
return None
def getContributionList( self ):
return self.contributions.values()
def getNumberOfContributions(self, only_scheduled=False):
if only_scheduled:
return len(filter(lambda c: c.isScheduled(), self.contributions.itervalues()))
else:
return len(self.contributions)
def isProtected(self):
# tells if a session is protected or not
return (self.hasProtectedOwner() + self.getAccessProtectionLevel()) > 0
def getAccessProtectionLevel( self ):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected( self ):
return self.__ac.isItselfProtected()
def hasAnyProtection( self ):
"""Tells whether a session has any kind of protection over it:
access or domain protection.
"""
if self.__ac.isProtected():
return True
if self.getDomainList():
return True
if self.getAccessProtectionLevel() == -1:
return False
return self.getOwner().hasAnyProtection()
def hasProtectedOwner( self ):
if self.getOwner() != None:
return self.getOwner().isProtected()
return False
def setProtection( self, private ):
self.__ac.setProtection( private )
self.notify_protection_to_owner(self)
def grantAccess( self, prin ):
self.__ac.grantAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "access")
def revokeAccess( self, prin ):
self.__ac.revokeAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "access")
def canView( self, aw ):
"""tells whether the specified user has access to the current object
or any of its sub-objects
"""
if self.canAccess( aw ):
return True
for contrib in self.getContributionList():
if contrib.canView( aw ):
return True
return False
def isAllowedToAccess( self, user ):
if not user:
return False
if user in self.getCoordinatorList() or self.__ac.canUserAccess( user ) \
or self.canUserModify( user ) or (not self.isItselfProtected() and self.getOwner().isAllowedToAccess(user)):
return True
return False
def canAccess( self, aw ):
# Allow harvesters (Invenio, offline cache) to access
# protected pages
if has_request_context() and self.__ac.isHarvesterIP(request.remote_addr):
return True
#####################################################
# Managers have always access
if self.canModify(aw):
return True
flag_allowed_to_access = self.isAllowedToAccess(aw.getUser())
if not self.canIPAccess(request.remote_addr) and not self.canUserModify(aw.getUser()) and \
not flag_allowed_to_access:
return False
if not self.isProtected():
return True
return flag_allowed_to_access or self.conference.canKeyAccess(aw)
def grantModification(self, sb, sendEmail=True):
if isinstance(sb, SessionChair) or isinstance(sb, SlotChair):
ah = AvatarHolder()
results = ah.match({"email": sb.getEmail()}, exact=1)
r = None
for i in results:
if sb.getEmail().lower().strip() in [j.lower().strip() for j in i.getEmails()]:
r = i
break
if r is not None and r.isActivated():
self.__ac.grantModification(r)
r.linkTo(self, "manager")
elif sb.getEmail() != "":
modificationEmailGranted = self.__ac.grantModificationEmail(sb.getEmail())
if modificationEmailGranted and sendEmail:
notif = pendingQueues._PendingManagerNotification( [sb] )
mail.GenericMailer.sendAndLog(notif, self.getConference(), 'Session')
else:
self.__ac.grantModification( sb )
if isinstance(sb, AvatarUserWrapper):
sb.linkTo(self, "manager")
def revokeModification( self, prin ):
self.__ac.revokeModification( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "manager")
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user) or self.getConference().canKeyModify()
def canUserModify( self, av ):
"""Tells whether a user is allowed to modify the current session:
only if the user is granted to modify the session or the user
can modify the corresponding conference.
"""
return self.getConference().canUserModify( av ) or self.__ac.canModify( av )
def getManagerList( self ):
return self.__ac.getModifierList()
def getAllowedToAccessList( self ):
return self.__ac.getAccessList()
def addMaterial( self, newMat ):
newMat.setId( str(self.__materialGenerator.newCount()) )
newMat.setOwner( self )
self.materials[ newMat.getId() ] = newMat
self.notifyModification()
def _setSchedule(self):
self.__schedule=SessionSchedule(self)
sl=self.getSlotList()
for slot in self.getSlotList():
self.__schedule.addEntry(slot.getSchEntry())
def getSchedule( self ):
try:
if self.__schedule is None or not isinstance(self.__schedule,SessionSchedule):
self._setSchedule()
except AttributeError, e:
self._setSchedule()
return self.__schedule
def getMasterSchedule( self ):
return self.getOwner().getSchedule()
def requireDomain( self, dom ):
self.__ac.requireDomain( dom )
def freeDomain( self, dom ):
self.__ac.freeDomain( dom )
def getDomainList( self ):
return self.__ac.getRequiredDomainList()
def setComments(self,comm):
self._comments = comm.strip()
def getComments(self):
try:
if self._comments:
pass
except AttributeError,e:
self._comments=""
return self._comments
def _addCoordinator(self, av):
if av is None or self._coordinators.has_key(av.getId()):
return
self._coordinators[av.getId()]=av
if self.getConference() is not None:
self.getConference().addSessionCoordinator(self,av)
def getCoordinatorEmailList(self):
try:
return self._coordinatorsEmail
except:
self._coordinatorsEmail = []
return self._coordinatorsEmail
def _addCoordinatorEmail(self, email):
if email not in self.getCoordinatorEmailList():
self.getCoordinatorEmailList().append(email)
def removeCoordinatorEmail(self, email):
if email in self.getCoordinatorEmailList():
self.getCoordinatorEmailList().remove(email)
self._p_changed = 1
def addCoordinator( self, sb, sendEmail=True ):
"""Grants coordination privileges to user.
Arguments:
sb -- It can be either:
(AvatarUserWrapper) the user to which
coordination privileges must be granted.
or:
(MaKaC.conference.SessionChair) a non-existing which
has to become indico user before to be granted with privileges.
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators=OOBTree()
if isinstance(sb, SessionChair):
ah = AvatarHolder()
results=ah.match({"email":sb.getEmail()}, exact=1)
r=None
for i in results:
if sb.getEmail().lower().strip() in [j.lower().strip() for j in i.getEmails()]:
r=i
break
if r is not None and r.isActivated():
self._addCoordinator(r)
r.linkTo(self, "coordinator")
else:
self.getConference().getPendingQueuesMgr().addPendingCoordinator(sb)
else:
self._addCoordinator(sb)
if isinstance(sb, AvatarUserWrapper):
sb.linkTo(self, "coordinator")
def removeCoordinator( self, av ):
"""Revokes coordination privileges to user.
Arguments:
av -- (AvatarUserWrapper) user for which coordination privileges
must be revoked
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators=OOBTree()
if av is None or not self._coordinators.has_key(av.getId()):
return
del self._coordinators[av.getId()]
self._p_changed = True
if isinstance(av, AvatarUserWrapper):
av.unlinkTo(self, "coordinator")
if self.getConference() is not None:
self.getConference().removeSessionCoordinator(self,av)
def isCoordinator( self, av ):
"""Tells whether the specified user is a coordinator of the session.
Arguments:
av -- (AvatarUserWrapper) user to be checked
Return value: (boolean)
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators=OOBTree()
if (av is not None) and self._coordinators.has_key(av.getId()):
return True
ret = False
if isinstance(av, AvatarUserWrapper):
for email in av.getEmails():
if email in self.getCoordinatorEmailList():
self.addCoordinator(av)
self.removeCoordinatorEmail(email)
ret = True
return ret
def hasConvenerByEmail(self, email):
for convener in self.getConvenerList():
if email == convener.getEmail():
return True
return False
def getCoordinatorList( self ):
"""Return all users which have privileges to coordinate the session.
Return value: (list)
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators=OOBTree()
return self._coordinators.values()
def canCoordinate(self, aw_or_user, right=""):
"""Tells if a user has coordination privileges.
Only session coordinators have coordination privileges over a
session.
Params:
aw -- (MaKaC.accessControl.AccessWrapper) User access
information for which the coordination privileges must be
checked.
Return value: (boolean)
"""
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
if right != "":
return self.isCoordinator(aw_or_user) and self.getConference().hasSessionCoordinatorRight(right)
return self.isCoordinator(aw_or_user)
def getScheduleType(self):
try:
if self._ttType:
pass
except AttributeError:
self._ttType=SlotSchTypeFactory.getDefaultId()
return self._ttType
def setScheduleType(self,t):
try:
if self._ttType:
pass
except AttributeError:
self._ttType=SlotSchTypeFactory.getDefaultId()
t=str(t).strip().lower()
if t not in SlotSchTypeFactory.getIdList() or t==self._ttType:
return
self._ttType=t
for slot in self.getSlotList():
slot.setScheduleType(t)
def getAccessController(self):
return self.__ac
def _cmpTitle( s1, s2 ):
s1=s1.getTitle().lower().strip()
s2=s2.getTitle().lower().strip()
return cmp( s1, s2 )
_cmpTitle=staticmethod(_cmpTitle)
class SessionSlot(Persistent, Fossilizable, Locatable):
fossilizes(ISessionSlotFossil)
def __init__(self,session,**sessionSlotData):
self.session = session
self.id = "not assigned"
self.title = ""
self.startDate=None
self.duration = timedelta(minutes=1)
self.places = []
self.rooms = []
self._conveners = []
self._convenerGen=Counter()
self._schedule=SlotSchTypeFactory.getDefaultKlass()(self)
self._sessionSchEntry=LinkedTimeSchEntry(self)
self._confSchEntry=LinkedTimeSchEntry(self)
self._contributionDuration = None
@property
@memoize_request
def note(self):
from indico.modules.events.notes.models.notes import EventNote
return EventNote.get_for_linked_object(self.session)
def getTimezone( self ):
return self.getConference().getTimezone()
def getLogInfo(self):
data = {}
data["id"] = self.id
data["title"] = self.title
data["session"] = self.session.getTitle()
data["start date"] = format_datetime(self.startDate, locale='en_GB', timezone=self.getConference().timezone)
data["duration"] = format_human_timedelta(self.duration)
i = 0
for p in self.places :
data["place %s"%i] = p.getName()
i+=1
i = 0
for r in self.rooms :
data["room %s"%i] = r.getName()
i+=1
for c in self._conveners :
data["convener %s"%c.getId()] = c.getFullName()
return data
def clone(self,session, options):
slot = SessionSlot(session)
slot.session = session
slot.setTitle(self.getTitle())
timeDifference = session.getConference().getStartDate() - self.getSession().getConference().getStartDate()
slot.setStartDate(self.getStartDate() + timeDifference)
slot.setDuration(dur=self.getDuration(), check=2)
#places
if self.getOwnLocation() is not None:
slot.setLocation(self.getOwnLocation().clone())
#rooms
if self.getOwnRoom() is not None:
slot.setRoom(self.getOwnRoom().clone())
#chairs = conveners
for ch in self.getOwnConvenerList() :
slot.addConvener(ch.clone())
#populate the timetable
if options.get("contributions", False) :
for entry in self.getEntries() :
if isinstance(entry, BreakTimeSchEntry) :
newentry = entry.clone(slot)
slot.getSchedule().addEntry(newentry,0)
elif isinstance(entry, ContribSchEntry) :
contrib = entry.getOwner()
newcontrib = contrib.clone(session, options, timeDifference)
slot.getSchedule().addEntry(newcontrib.getSchEntry(),0)
ContextManager.setdefault("clone.unique_id_map", {})[contrib.getUniqueId()] = newcontrib.getUniqueId()
slot.setContribDuration(0, 0, self.getContribDuration())
slot.notifyModification(cleanCache = False)
return slot
def fit( self ):
"""
sets the start date of the slot to the start date of the first son
and the end date to the end date of the last son
"""
sch = self.getSchedule()
entries = sch.getEntries()
if len(entries) > 0:
self.setStartDate(entries[0].getStartDate(),0,0)
self.setEndDate(sch.calculateEndDate(), check=0)
def recalculateTimes( self, type, diff ):
"""
recalculate and reschedule the contributions of the session slot with a time "diff" of separation.
"""
if type=="duration":
entries = self.getSchedule().getEntries()[:]
i=0
while i<len(entries):
entry=entries[i]
if i+1 == len(entries):
dur=self.getEndDate()-entry.getStartDate()
else:
nextentry=entries[i+1]
dur=nextentry.getStartDate()-entry.getStartDate()-diff
if dur<timedelta(0):
raise EntryTimingError( _("""With the time between entries you've chosen, the entry "%s" will have a duration less than zero minutes. Please, choose another time""")%entry.getTitle())
entry.setDuration(dur=dur)
i+=1
if len(entries) != 0 and self.getEndDate() < entry.getEndDate():
self.setEndDate(entry.getEndDate(),2)
elif type=="startingTime":
st = self.getStartDate()
entries = self.getSchedule().getEntries()[:]
for entry in entries:
entry.setStartDate(st,0,0)
# add diff to last item end date if and only if the item is
# not a break
#if not isinstance(entry, BreakTimeSchEntry):
# st=entry.getEndDate()+diff
#else:
# st=entry.getEndDate()
st=entry.getEndDate()+diff
if len(entries) != 0 and self.getEndDate() < st:
self.setEndDate(st,2)
def setValues(self,data,check=2, moveEntriesBelow=0):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates
"""
# In order to move the entries below, it is needed to know the diff (we have to move them)
# and the list of entries to move. It's is needed to take those datas in advance because they
# are going to be modified before the moving.
if moveEntriesBelow == 1:
oldStartDate=copy.copy(self.getStartDate())
oldDuration=copy.copy(self.getDuration())
i=self.getConfSchEntry().getSchedule().getEntries().index(self.getConfSchEntry())+1
entriesList = self.getConfSchEntry().getSchedule().getEntries()[i:]
self.title=data.get("title", "NO TITLE ASSIGNED")
# Do we move all entries in the slot
move = int(data.get("move",0))
if "locationName" in data:
loc = self.getOwnLocation()
if not loc:
loc = CustomLocation()
self.setLocation( loc )
loc.setName( data["locationName"] )
loc.setAddress( data.get("locationAddress", "") )
else:
self.setLocation( None )
if "roomName" in data:
room = self.getOwnRoom()
if not room:
room = CustomRoom()
self.setRoom( room )
room.setName( data["roomName"] )
else:
self.setRoom( None )
sDate = eDate = None
confTZ = self.getOwner().getConference().getTimezone()
if data.get("sDate",None) is not None:
sd = data.get("sDate")
sDate = timezone(confTZ).localize(datetime(sd.year,sd.month,sd.day,sd.hour,sd.minute))
elif data.get("sYear","")!="" and data.get("sMonth","")!="" and \
data.get("sDay","")!="" and data.get("sHour","")!="" and \
data.get("sMinute","")!="":
sDate = timezone(confTZ).localize(datetime(int(data["sYear"]),int(data["sMonth"]),
int(data["sDay"]),int(data["sHour"]),
int(data["sMinute"])))
if data.get("eDate",None) is not None:
ed = data.get("eDate")
eDate = timezone(confTZ).localize(datetime(ed.year,ed.month,ed.day,ed.hour,ed.minute))
elif data.get("eYear","")!="" and data.get("eMonth","")!="" and \
data.get("eDay","")!="" and data.get("eHour","")!="" and \
data.get("eMinute","")!="":
eDate = timezone(confTZ).localize(datetime(int(data["eYear"]),int(data["eMonth"]),
int(data["eDay"]),int(data["eHour"]),
int(data["eMinute"])))
if sDate != None and eDate != None:
sDateUTC = sDate.astimezone(timezone('UTC'))
eDateUTC = eDate.astimezone(timezone('UTC'))
self.setDates(sDateUTC,eDateUTC,check,moveEntries=move)
elif sDate != None:
sDateUTC = sDate.astimezone(timezone('UTC'))
self.setStartDate(sDateUTC,check,moveEntries=move)
if data.get("durHours","")!="" and data.get("durMins","")!="":
self.setDuration(hours=data["durHours"],minutes=data["durMins"],check=check)
if data.get("contribDurHours","")!="" and data.get("contribDurMins","")!="":
self.setContribDuration(int(data["contribDurHours"]),int(data["contribDurMins"]))
elif data.get("contribDuration","")!="":
self.setContribDuration(dur=data.get("contribDuration"))
else:
self.setContribDuration(None,None)
conveners = data.get("conveners",None)
if conveners is not None:
self.clearConvenerList()
for conv in conveners:
sc = SlotChair()
sc.setTitle(conv.getTitle())
sc.setFirstName(conv.getFirstName())
sc.setFamilyName(conv.getFamilyName())
sc.setAffiliation(conv.getAffiliation())
sc.setEmail(conv.getEmail())
self.addConvener(sc)
if moveEntriesBelow == 1:
diff = (self.getStartDate() - oldStartDate) + (self.getDuration() - oldDuration)
self.getSchedule().moveEntriesBelow(diff, entriesList)
self._checkInnerSchedule()
self.notifyModification()
def _checkInnerSchedule( self ):
self.getSchedule().checkSanity()
def setContribDuration(self, hour=0, min=0, dur=None):
self._contributionDuration = None
if dur is not None:
self._contributionDuration=dur
elif hour != None and min != None:
self._contributionDuration = timedelta(hours=hour,minutes=min)
def getContribDuration(self):
"""
Duration by default for contributions within the slots.
"""
try:
if self._contributionDuration:
pass
except AttributeError, e:
self._contributionDuration = None
return self._contributionDuration
def notifyModification( self, cleanCache = True, cleanCacheEntries = False):
self.getSession().notifyModification(cleanCache = False)
if cleanCache:
self.cleanCache(cleanCacheEntries)
self._p_changed = 1
def cleanCache(self, cleanCacheEntries = False):
if not ContextManager.get('clean%s'%self.getUniqueId(), False):
ScheduleToJson.cleanCache(self)
ContextManager.set('clean%s'%self.getUniqueId(), True)
if cleanCacheEntries:
for entry in self.getSchedule().getEntries():
entry.getOwner().cleanCache(cleanConference = False)
def getLocator( self ):
l=self.getSession().getLocator()
l["slotId"]=self.getId()
return l
def getConference( self ):
return self.getSession().getConference()
def getSession(self):
return self.session
def getOwner(self):
return self.session
def getContributionList(self):
return [e.getOwner() for e in ifilter(lambda e: isinstance(e, ContribSchEntry),
self.getSchedule().getEntries())]
def _setSchedule(self, klass):
old_sch = self.getSchedule()
self._schedule = klass(self)
#after removing old entries, one could try to fit them into the new
# schedule, but there are several things to consider which are left
# for later implementation (breaks, entries not fitting in the
# slots,...)
while len(old_sch.getEntries()) > 0:
entry = old_sch.getEntries()[0]
old_sch.removeEntry(entry)
self.notifyModification()
def getSchedule(self):
return self._schedule
def getMasterSchedule( self ):
return self.getOwner().getSchedule()
def getConfSchEntry( self ):
try:
if self._confSchEntry:
pass
except AttributeError:
self._confSchEntry=LinkedTimeSchEntry(self)
return self._confSchEntry
def getSessionSchEntry( self ):
try:
if self._sessionSchEntry:
pass
except AttributeError:
self._sessionSchEntry=self._schEntry
return self._sessionSchEntry
def setId( self, newId ):
self.id=str(newId)
self.notifyModification()
def getId( self ):
return self.id
def getUniqueId( self ):
"""Returns (string) the unique identiffier of the item.
Used mainly in the web session access key table"""
return "%sl%s" % (self.getSession().getUniqueId(),self.id)
def setTitle( self, newTitle ):
self.title=newTitle
self.notifyModification()
def getTitle( self ):
try:
if self.title:
pass
except AttributeError,e:
self.title=""
return self.title
def getFullTitle( self ):
return self.getSession().getTitle() + (": " + self.getTitle() if self.getTitle() else "")
def getName(self):
return "slot %s"%self.getId()
def getDescription(self):
return self.getSession().getDescription()
def setDates(self, sDate, eDate, check=2, moveEntries=0):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if sDate > eDate:
raise FormValuesError(_("End date cannot be prior to Start date"), _("Slot"))
self.setStartDate(sDate, check, moveEntries, checkDuration=False)
self.setDuration(0, 0, 0, eDate-sDate, check)
self.notifyModification()
def getEntries(self):
entriesList = self.getSchedule().getEntries()
return entriesList
def move(self, sDate):
diff=sDate-self.startDate
self.startDate = sDate
for slotEntry in self.getSchedule().getEntries():
if isinstance(slotEntry, BreakTimeSchEntry):
slotEntry.startDate = slotEntry.getStartDate() + diff
else:
se = slotEntry.getOwner()
se.startDate = se.getStartDate() + diff
self.getSchedule().reSchedule()
def verifyStartDate(self, sDate,check=2):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
tz = timezone(self.getConference().getTimezone())
if sDate < self.getSession().getStartDate():
if check == 1:
raise ParentTimingError(_("The slot \"%s\" cannot start (%s) before its parent session starts (%s)")%\
(self.getTitle(), sDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
self.getSession().getStartDate().astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Slot"))
elif check == 2:
self.getSession().setStartDate(sDate, check, 0)
def setStartDate(self,sDate,check=2,moveEntries=0,checkDuration=True):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if sDate is None:
return
if not sDate.tzname():
raise MaKaCError("date should be timezone aware")
if check != 0:
#If not using .fit() at the end of this method, comment it out
#if self.getSession().getStartDate() > sDate:
# self.getSession().duration += self.getSession().getStartDate() - sDate
self.verifyStartDate(sDate,check)
# calculate the difference betwwen old and new date
difference = None
if self.startDate is not None:
difference = sDate - self.getStartDate()
self.startDate=copy.copy(sDate)
if difference != None and difference != timedelta(0) and moveEntries:
ContextManager.get('autoOps').append((self, "ENTRIES_MOVED",
self, sDate.astimezone(timezone(self.getTimezone()))))
self.getSchedule().moveEntriesBelow(difference,self.getSchedule().getEntries()[:])
if self.getConference() and not self.getConference().getEnableSessionSlots() and self.getSession().getStartDate() != sDate:
self.getSession().setStartDate(sDate, check, 0)
if check != 0 and self.getSession() and checkDuration:
self.verifyDuration(self.getDuration(), check=check)
# synchronize with other timetables
self.getSessionSchEntry().synchro()
self.getConfSchEntry().synchro()
self.getSession().fit()
self.notifyModification()
def setEndDate(self,eDate,check=2):
if not eDate.tzname():
raise MaKaCError("date should be timezone aware")
if check != 0:
self.verifyDuration(eDate-self.startDate, check)
self.setDuration(dur=eDate-self.startDate,check=check)
if self.getConference() and not self.getConference().getEnableSessionSlots() and self.getSession().getEndDate() != eDate:
self.getSession().setEndDate(eDate, check)
self.getSession().fit()
self.notifyModification()
def getStartDate( self ):
return self.startDate
def getAdjustedStartDate(self,tz=None):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.startDate.astimezone(timezone(tz))
def getEndDate( self ):
if self.startDate is None:
return None
return self.startDate+self.duration
def getAdjustedEndDate( self, tz=None ):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
if self.getEndDate():
return self.getEndDate().astimezone(timezone(tz))
return None
def getDuration( self ):
return self.duration
def isMoreThanDay(self):
if self.getDuration() >= timedelta(days=1):
return True
return False
def verifyDuration(self, dur, check=1):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
tz = timezone(self.getConference().getTimezone())
if dur <= timedelta(0):
raise FormValuesError(_("The duration cannot be less than zero"), _("Slot"))
if dur.days > 1:
raise FormValuesError(_("The duration cannot be more than one day"), _("Slot"))
if self.startDate is not None:
sessionStartDate = self.getSession().getStartDate()
sessionEndDate = self.getSession().getEndDate()
# end date has to be between the session dates
eDate = self.startDate + dur
if eDate > sessionEndDate:
if check==1:
raise EntryTimingError(_("The session slot cannot end (%s) after its parent session (%s)") \
% (eDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
sessionEndDate.astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Slot"))
elif check==2:
ContextManager.get('autoOps').append((self, "OWNER_END_DATE_EXTENDED",
self.getSession(), eDate.astimezone(tz)))
self.getSession().setEndDate(eDate,check)
if eDate.astimezone(tz).date() > self.startDate.astimezone(tz).date():
raise TimingError( _("The time slot must end on the same day it has started"), _("Slot"))
# do not modify if slot entries will be affected
sch = self.getSchedule()
entries = sch.getEntries()
if entries != []:
if eDate < sch.calculateEndDate():
raise TimingError(_("The session slot cannot end at (%s) because there is a contribution (%s) ending after that time. ")%\
(eDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
sch.calculateEndDate().astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Slot"))
def setDuration(self, days=0,hours=0,minutes=0,dur=0,check=1):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if dur==0:
dur = timedelta(days=int(days),hours=int(hours),minutes=int(minutes))
if dur==0 and check==2:
ContextManager.get('autoOps').append((self, "DURATION_SET",
self, 1))
dur = timedelta(minutes=1)
if dur > timedelta(days=1) and check==2:
pass#dur = timedelta(days=1)
if check != 0:
self.verifyDuration(dur, check)
self.duration = dur
self.getSessionSchEntry().synchro()
self.getConfSchEntry().synchro()
self.getSession().fit()
self.notifyModification()
def getLocationParent( self ):
"""
Returns the object from which the room/location
information should be inherited
"""
return self.session.conference
def delete(self):
signals.event.session_slot_deleted.send(self)
self.getSchedule().clear()
if self.getSession() is not None:
self.getSession().removeSlot(self)
self.session=None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def getAccessController( self ):
return self.getSession().getAccessController()
def canAccess(self,aw):
return self.getSession().canAccess(aw)
def canView(self,aw):
return self.getSession().canView(aw)
def isProtected(self):
return self.getSession().isProtected()
def getAccessKey( self ):
return self.getSession().getAccessKey()
def setScheduleType(self,id):
id=str(id).strip().lower()
currentId=SlotSchTypeFactory.getId(self.getSchedule())
if id not in SlotSchTypeFactory.getIdList() or id==currentId:
return
self._setSchedule(SlotSchTypeFactory.getScheduleKlass(id))
def getConvenerList(self):
try:
if self._conveners:
pass
except AttributeError:
self._conveners = []
if self._conveners == []:
return self.getSession().getConvenerList()
return self._conveners
def addConvener(self,newConv):
if newConv in self._conveners:
return
try:
if self._convenerGen:
pass
except AttributeError:
self._convenerGen=Counter()
id = newConv.getId()
if id == "":
id=int(self._convenerGen.newCount())
newConv.includeInSlot(self,id)
self._conveners.append(newConv)
self.notifyModification()
def removeConvener(self,conv):
if conv not in self._conveners:
return
self._conveners.remove(conv)
conv.delete()
self.notifyModification()
def recoverConvener(self, con):
self.addConvener(con)
con.recover()
def getConvenerById(self,id):
id=int(id)
for conv in self._conveners:
if conv.getId()==id:
return conv
return None
def getOwnConvenerList(self):
try:
if self._conveners:
pass
except AttributeError:
self._conveners = []
return self._conveners
def clearConvenerList(self):
while len(self.getOwnConvenerList()) > 0:
self._conveners.pop()
self.notifyModification()
def getColor(self):
res=""
if self.getSession() is not None:
res=self.getSession().getColor()
return res
def getTextColor(self):
res=""
if self.getSession() is not None:
res=self.getSession().getTextColor()
return res
def getRecursiveAllowedToAccessList(self):
return self.getSession().getRecursiveAllowedToAccessList()
def canModify(self, aw_or_user):
return self.getSession().canModify(aw_or_user)
class ContributionParticipation(Persistent, Fossilizable):
fossilizes(IContributionParticipationFossil, IContributionParticipationMinimalFossil,\
IContributionParticipationTTDisplayFossil,\
IContributionParticipationTTMgmtFossil)
def __init__( self ):
self._contrib = None
self._id = ""
self._firstName = ""
self._surName = ""
self._email = ""
self._affiliation = ""
self._address = ""
self._phone = ""
self._title = ""
self._fax = ""
def _notifyModification( self ):
if self._contrib != None:
self._contrib.notifyModification()
def setValues(self, data):
self.setFirstName(data.get("firstName", ""))
self.setFamilyName(data.get("familyName",""))
self.setAffiliation(data.get("affilation",""))
self.setAddress(data.get("address",""))
self.setEmail(data.get("email",""))
self.setFax(data.get("fax",""))
self.setTitle(data.get("title",""))
self.setPhone(data.get("phone",""))
self._notifyModification()
def getValues(self):
data={}
data["firstName"]=self.getFirstName()
data["familyName"]=self.getFamilyName()
data["affilation"]=self.getAffiliation()
data["address"]=self.getAddress()
data["email"]=self.getEmail()
data["fax"]=self.getFax()
data["title"]=self.getTitle()
data["phone"]=self.getPhone()
return data
def clone(self):
part = ContributionParticipation()
part.setValues(self.getValues())
return part
def setDataFromAvatar(self,av):
# av is an Avatar object.
if av is None:
return
self.setFirstName(av.getName())
self.setFamilyName(av.getSurName())
self.setEmail(av.getEmail())
self.setAffiliation(av.getOrganisation())
self.setAddress(av.getAddress())
self.setPhone(av.getTelephone())
self.setTitle(av.getTitle())
self.setFax(av.getFax())
self._notifyModification()
def setDataFromOtherCP(self,cp):
# cp is a ContributionParticipation object.
if cp is None:
return
self.setFirstName(cp.getFirstName())
self.setFamilyName(cp.getFamilyName())
self.setEmail(cp.getEmail())
self.setAffiliation(cp.getAffiliation())
self.setAddress(cp.getAddress())
self.setPhone(cp.getPhone())
self.setTitle(cp.getTitle())
self.setFax(cp.getFax())
self._notifyModification()
def includeInContribution( self, contrib, id ):
if self.getContribution() == contrib and self.getId()==id.strip():
return
self._contrib = contrib
self._id = id
def delete( self ):
self._contrib = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def setId(self, newId):
self._id = newId
def getId( self ):
return self._id
def getContribution( self ):
return self._contrib
def getConference(self):
return self._contrib.getConference()
def getLocator(self):
if self.getContribution() is None:
return None
loc=self.getContribution().getLocator()
loc["authorId"]=self.getId()
return loc
def _unindex(self):
contrib=self.getContribution()
if contrib is not None:
conf=contrib.getConference()
if conf is not None:
conf.unindexAuthor(self)
conf.unindexSpeaker(self)
def _index(self):
contrib=self.getContribution()
if contrib is not None:
conf=contrib.getConference()
if conf is not None:
conf.indexAuthor(self)
conf.indexSpeaker(self)
@Updates ('MaKaC.conference.ContributionParticipation', 'firstName')
def setFirstName( self, newName ):
tmp=newName.strip()
if tmp==self._firstName:
return
self._unindex()
self._firstName=tmp
self._index()
self._notifyModification()
def getFirstName( self ):
return self._firstName
def getName( self ):
return self._firstName
@Updates ('MaKaC.conference.ContributionParticipation', 'familyName')
def setFamilyName( self, newName ):
tmp=newName.strip()
if tmp==self._surName:
return
self._unindex()
self._surName=tmp
self._index()
self._notifyModification()
def getFamilyName( self ):
return self._surName
def getSurName( self ):
return self._surName
@Updates ('MaKaC.conference.ContributionParticipation', 'email')
def setEmail( self, newMail ):
tmp=newMail.strip()
if tmp==self._email:
return
self._unindex()
self._email=newMail.strip()
self._index()
self._notifyModification()
def getEmail( self ):
return self._email
@Updates ('MaKaC.conference.ContributionParticipation', 'affiliation')
def setAffiliation( self, newAffil ):
self._affiliation = newAffil.strip()
self._notifyModification()
def getAffiliation( self ):
if self._affiliation.lower() == "unknown":
return ""
return self._affiliation
@Updates ('MaKaC.conference.ContributionParticipation', 'address')
def setAddress( self, newAddr ):
self._address = newAddr.strip()
self._notifyModification()
def getAddress( self ):
return self._address
@Updates('MaKaC.conference.ContributionParticipation', 'phone')
def setPhone( self, newPhone ):
self._phone = newPhone.strip()
self._notifyModification()
def getPhone( self ):
return self._phone
@Updates ('MaKaC.conference.ContributionParticipation', 'title')
def setTitle( self, newTitle ):
self._title = newTitle.strip()
self._notifyModification()
def getTitle( self ):
return self._title
@Updates ('MaKaC.conference.ContributionParticipation', 'fax')
def setFax( self, newFax ):
self._fax = newFax.strip()
self._notifyModification()
def getFax( self ):
try:
if self._fax:
pass
except AttributeError:
self._fax=""
return self._fax
def getDirectFullName( self ):
res = self.getDirectFullNameNoTitle()
if self.getTitle() != "":
res = "%s %s"%( self.getTitle(), res )
return res
def getDirectFullNameNoTitle(self, upper=True):
familyName = safe_upper(self.getFamilyName()) if upper else self.getFamilyName()
return "{0} {1}".format(self.getFirstName(), familyName).strip()
def getFullName(self):
res = self.getFullNameNoTitle()
if self.getTitle():
res = "%s %s" % (self.getTitle(), res)
return res
def getFullNameNoTitle(self):
res = safe_upper(self.getFamilyName())
if self.getFirstName():
if res.strip():
res = "%s, %s" % (res, self.getFirstName())
else:
res = self.getFirstName()
return res
def getAbrName(self):
res = self.getFamilyName()
if self.getFirstName():
if res:
res = "%s, " % res
res = "%s%s." % (res, safe_upper(self.getFirstName()[0]))
return res
def isSubmitter(self):
if self.getContribution() is None:
return False
return self.getContribution().canUserSubmit(self)
def isPendingSubmitter(self):
if self.getContribution() is None:
return False
if self.getContribution().getConference() is None:
return False
return self.getContribution().getConference().getPendingQueuesMgr().isPendingSubmitter(self)
def isInAuthorList(self):
# Sometimes authors are not in the author index for an unknown reason.
# In this case we don't want to link to the author page since opening it would fail
return self.getConference().getAuthorIndex().getByAuthorObj(self) is not None
@staticmethod
def _cmpFamilyName(cp1, cp2):
o1 = "%s %s"%(cp1.getFamilyName(), cp1.getFirstName())
o2 = "%s %s"%(cp2.getFamilyName(), cp2.getFirstName())
o1=o1.lower().strip()
o2=o2.lower().strip()
return cmp( o1, o2 )
class AuthorIndex(Persistent):
def __init__(self):
self._idx=OOBTree()
def _getKey(self,author):
k = "%s %s %s"%(author.getFamilyName().lower(),author.getFirstName().lower(),author.getEmail().lower())
return k.strip()
def index(self,author):
key=self._getKey(author)
if not self._idx.has_key(key):
self._idx[key]=[]
l = self._idx[key]
l.append(author)
self._idx[key] = l
self.notifyModification()
def unindex(self,author):
key=self._getKey(author)
if self._idx.has_key(key):
if author in self._idx[key]:
l = self._idx[key]
l.remove(author)
self._idx[key] = l
if len(self._idx[key])<=0:
del self._idx[key]
self.notifyModification()
def getParticipations(self):
return self._idx.values()
def getById(self, id):
return self._idx.get(id,None)
def getByAuthorObj(self, auth):
return self.getById(self._getKey(auth))
def getParticipationKeys(self):
return self._idx.keys()
def notifyModification(self):
self._idx._p_changed = 1
self._p_changed = 1
def iteritems(self):
return self._idx.iteritems()
def match(self, criteria, exact=0):
self._options = ['organisation', 'surName', 'name', 'email']
l = []
for item in self.getParticipations():
if len(item)>0:
ok = []
for f,v in criteria.items():
if f == 'organisation' and v != '':
if (exact == 0 and item[0].getAffiliation().lower().find(v.lower()) == -1) or (exact == 1 and item[0].getAffiliation().lower() != v.lower()):
ok.append(False)
else:
ok.append(True)
if f == 'surName' and v!= '':
if (exact == 0 and item[0].getSurName().lower().find(v.lower()) == -1) or (exact == 1 and item[0].getSurName().lower() != v.lower()):
ok.append(False)
else:
ok.append(True)
if f == 'name' and v!= '':
if (exact == 0 and item[0].getName().lower().find(v.lower()) == -1) or (exact == 1 and item[0].getName().lower() != v.lower()):
ok.append(False)
else:
ok.append(True)
if f == 'email' and v!= '':
if (exact == 0 and item[0].getEmail().lower().find(v.lower()) == -1) or (exact == 1 and item[0].getEmail().lower() != v.lower()):
ok.append(False)
else:
ok.append(True)
if len(ok) > 0 and not False in ok:
l.append(item[0])
return l
class _AuthIdx(Persistent):
def __init__(self,conf):
self._conf=conf
self._idx=OOBTree()
def _getKey(self,auth):
return "%s %s"%(auth.getFamilyName().lower(),auth.getFirstName().lower())
def index(self,auth):
if auth.getContribution() is None:
raise MaKaCError( _("Cannot index an author of a contribution which has not been included in a Conference"), _("Author Index"))
if auth.getContribution().getConference()!=self._conf:
raise MaKaCError( _("cannot index an author of a contribution which does not belong to this Conference"), _("Author Index"))
key=self._getKey(auth)
contribId=str(auth.getContribution().getId())
if not self._idx.has_key(key):
self._idx[key]=OIBTree()
if not self._idx[key].has_key(contribId):
self._idx[key][contribId]=0
self._idx[key][contribId]+=1
def unindex(self,auth):
if auth.getContribution() is None:
raise MaKaCError( _("Cannot unindex an author of a contribution which is not included in a conference"), _("Author Index"))
if auth.getContribution().getConference()!=self._conf:
raise MaKaCError( _("Cannot unindex an author of a contribution which does not belong to this conference"), _("Author Index"))
key=self._getKey(auth)
if not self._idx.has_key(key):
return
contribId=str(auth.getContribution().getId())
self._idx[key][contribId]-=1
if self._idx[key][contribId]<=0:
del self._idx[key][contribId]
if len(self._idx[key])<=0:
del self._idx[key]
def match(self,query):
query=query.lower().strip()
res=OISet()
for k in self._idx.keys():
if k.find(query)!=-1:
res=union(res,self._idx[k])
return res
class _PrimAuthIdx(_AuthIdx):
def __init__(self,conf):
_AuthIdx.__init__(self,conf)
for contrib in self._conf.getContributionList():
for auth in contrib.getPrimaryAuthorList():
self.index(auth)
class Contribution(CommonObjectBase, Locatable):
"""This class implements a conference contribution, being the concrete
contributes of the conference participants. The class contains
necessary attributes to store contribution basic meta data and provides
the useful operations to access and manage them. A contribution can be
attached either to a session or to a conference.
"""
fossilizes(IContributionFossil, IContributionWithSpeakersFossil, IContributionWithSubContribsFossil)
def __init__(self, **contribData):
self.parent = None
self._session = None
self.id = ""
self.title = ""
self._fields = {}
self.description = ""
self.startDate = None
self.duration = timedelta(0)
self.speakers = []
self.speakerText = ""
self.place = None
self.room = None
self._boardNumber = ""
self._resetSchEntry()
self.__ac = AccessController(self)
self.materials = {}
self.__materialGenerator = Counter()
self._subConts = []
self.__subContGenerator = Counter()
self.paper = None
self.slides = None
self.video = None
self.poster = None
self.reviewing = None
self._authorGen = Counter()
self._authors = OOBTree()
self._primaryAuthors = []
self._coAuthors = []
self._speakers = []
self._track = None
self._type = None
self._status = ContribStatusNotSch(self)
#List of allowed users to submit material
self._submitters = []
self._submittersEmail = []
self._modificationDS = nowutc()
self._keywords = ""
self._reviewManager = ReviewManager(self)
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
@return_ascii
def __repr__(self):
parent_id = self.parent.getId() if self.parent else None
return u'<Contribution({}, {}, {})>'.format(self.getId(), to_unicode(self.getTitle()), parent_id)
@property
@memoize_request
def note(self):
from indico.modules.events.notes.models.notes import EventNote
return EventNote.get_for_linked_object(self)
def getVerboseType(self):
return 'Contribution'
def getTimezone(self):
return self.getConference().getTimezone()
def getReviewManager(self):
if not hasattr(self, "_reviewManager"):
self._reviewManager = ReviewManager(self)
return self._reviewManager
def updateNonInheritingChildren(self, elem, delete=False):
self.getAccessController().updateNonInheritingChildren(elem, delete)
self.notify_protection_to_owner(elem, delete)
def notify_protection_to_owner(self, elem, delete=False):
self.getOwner().updateNonInheritingChildren(elem, delete)
def getKeywords(self):
try:
return self._keywords
except:
self._keywords = ""
return ""
def setKeywords(self, keywords):
if type(keywords) is list:
self._keywords = keywords[0]
else:
self._keywords = keywords
self.notifyModification(cleanCache=False)
def getFields(self, valueonly=False):
try:
if self._fields:
pass
except AttributeError:
self._fields = {}
if not valueonly:
return self._fields
else:
return dict((k, v.value if isinstance(v, AbstractFieldContent) else v) for k, v in self._fields.iteritems())
def removeField(self, field):
if field in self.getFields():
del self.getFields()[field]
self.notifyModification()
def setField(self, fid, v):
if isinstance(v, AbstractFieldContent):
v = v.value
try:
self.getFields()[fid].value = v
# `AttritbuteError` may happen if the field is not yet an AbstractFieldContent
# (lazy migration)
# `KeyError` means that the attribute doesn't exist in the contrib, in which
# case it should be created anyway
except (AttributeError, KeyError):
afm = self.getConference().getAbstractMgr().getAbstractFieldsMgr()
for f in afm.getFields():
if f.getId() == fid:
self.getFields()[fid] = AbstractFieldContent(f, v)
break
self.notifyModification()
def getField(self, field):
if field in self.getFields():
value = self.getFields()[field]
if type(value) is list:
return "".join(value)
elif value is None:
return ""
else:
return value
else:
return ""
def getLogInfo(self):
data = {}
data["subject"] = self.getTitle()
data["id"] = self.id
data["title"] = self.title
data["parent title"] = self.parent.getTitle()
if self._session is not None:
data["session title"] = self._session.getTitle()
data["description"] = self.description
if self.getConference():
afm = self.getConference().getAbstractMgr().getAbstractFieldsMgr()
for f in afm.getFields():
id = f.getId()
field = self.getField(id)
if field.value:
data['Abstract field {}'.format(field.field._caption)] = field.value
data["start date"] = format_datetime(self.startDate, locale='en_GB', timezone=self.getConference().timezone)
data["duration"] = format_human_timedelta(self.duration)
if self._track is not None:
data["track"] = self._track.getTitle()
if self._type is not None:
data["type"] = self._type.getName()
data["speaker text"] = self.speakerText
if self.place is not None:
data["place"] = self.place.getName()
if self.room is not None:
data["room"] = self.room.getName()
data["board number"] = self._boardNumber
for sc in self.getSubContributionList():
data["subcontribution %s" % sc.getId()] = sc.getTitle()
for pa in self._primaryAuthors:
data["primary author %s" % pa.getId()] = pa.getFullName()
for ca in self._coAuthors:
data["co-author %s" % ca.getId()] = ca.getFullName()
for sp in self._speakers:
data["speaker %s" % sp.getId()] = sp.getFullName()
for s in self.getSubmitterList():
if isinstance(s, AvatarUserWrapper):
data["submitter"] = s.getFullName()
else:
data["submitter"] = s.getName()
return data
def setValues(self, data, check=2, moveEntriesBelow=0):
"""Sets all the values of the current contribution object from a
dictionary containing the following key-value pairs:
title-(str)
description-(str)
locationName-(str) => name of the location, if not specified
it will be set to the parent location name.
locationAddress-(str)
roomName-(str) => name of the room, if not specified it will
be set to the parent room name.
year, month, day, sHour, sMinute - (str) => components of the
starting date of the session, if not specified it will
be set to now.
durationHours, durationMinutes - (str)
speakers - (str)
check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates
moveEntries:
0: no move
1: moveEntries below the contribution
Please, note that this method sets ALL values which means that if
the given dictionary doesn't contain any of the keys the value
will set to a default value.
"""
# In order to move the entries below, it is needed to know the diff (we have to move them)
# and the list of entries to move. It's is needed to take those datas in advance because they
# are going to be modified before the moving.
if moveEntriesBelow == 1:
oldStartDate = copy.copy(self.getStartDate())
oldDuration = copy.copy(self.getDuration())
i = self.getSchEntry().getSchedule().getEntries().index(self.getSchEntry())+1
entriesList = self.getSchEntry().getSchedule().getEntries()[i:]
if data.has_key("title"):
self.setTitle(data["title"])
if data.has_key("keywords"):
self.setKeywords(data["keywords"])
if data.has_key("description"):
self.setDescription(data["description"])
if data.has_key("type") and self.getConference():
self.setType(self.getConference().getContribTypeById(data["type"]))
if self.getConference():
afm = self.getConference().getAbstractMgr().getAbstractFieldsMgr()
for f in afm.getFields():
id = f.getId()
if data.has_key("f_%s" % id):
self.setField(id, data["f_%s" % id])
if "locationName" in data:
loc = self.getOwnLocation()
if not loc:
loc = CustomLocation()
self.setLocation(loc)
loc.setName(data["locationName"])
loc.setAddress(data.get("locationAddress", ""))
else:
self.setLocation(None)
#same as for the location
if "roomName" in data:
room = self.getOwnRoom()
if not room:
room = CustomRoom()
self.setRoom(room)
room.setName(data["roomName"])
room.retrieveFullName(data.get("locationName", ""))
else:
self.setRoom(None)
tz = 'UTC'
if self.getConference():
tz = self.getConference().getTimezone()
if data.get("targetDay", "") != "" and data.get("sHour", "") != "" and data.get("sMinute", "") != "" and check == 2:
############################################
# Fermi timezone awareness #
############################################
me = timezone(tz).localize(datetime(int(data["targetDay"][0:4]),
int(data["targetDay"][5:7]), int(data["targetDay"][8:])))
sdate = timezone(tz).localize(datetime(me.year, me.month,
me.day, int(data["sHour"]), int(data["sMinute"])))
self.setStartDate(sdate.astimezone(timezone('UTC')), check=2)
if data.get("sYear", "") != "" and data.get("sMonth", "") != "" and \
data.get("sDay", "") != "" and data.get("sHour", "") != "" and \
data.get("sMinute", "") != "":
self.setStartDate(timezone(tz).localize(datetime(int(data["sYear"]),
int(data["sMonth"]), int(data["sDay"]),
int(data["sHour"]), int(data["sMinute"]))).astimezone(timezone('UTC')),
check=2)
############################################
# Fermi timezone awareness(end) #
############################################
if data.get("durTimedelta", "") != "":
self.setDuration(check=check, dur=data["durTimedelta"])
elif data.get("durHours", "") != "" and data.get("durMins", "") != "":
self.setDuration(data["durHours"], data["durMins"], check)
else:
h = data.get("durHours", "").strip()
m = data.get("durMins", "").strip()
if h != "" or m != "":
h = h or "0"
m = m or "0"
if h != "0" or m != "0":
self.setDuration(int(h), int(m), check)
if data.has_key("boardNumber"):
self.setBoardNumber(data.get("boardNumber", ""))
if moveEntriesBelow == 1:
diff = (self.getStartDate() - oldStartDate) + (self.getDuration() - oldDuration)
self.getConference().getSchedule().moveEntriesBelow(diff, entriesList)
self.notifyModification()
def clone(self, parent, options, deltaTime = 0):
cont = Contribution()
parent.addContribution(cont, contrib_id=self.getId())
cont.setTitle( self.getTitle() )
cont.setDescription( self.getDescription() )
for k, v in self.getFields().items():
cont.setField(k, v)
cont.setKeywords( self.getKeywords() )
if deltaTime == 0 :
deltaTime = parent.getStartDate() - self.getOwner().getStartDate()
startDate = None
if self.startDate is not None :
startDate = self.getStartDate() + deltaTime
cont.setStartDate( startDate )
cont.setDuration( dur=self.getDuration() )
if self.getOwnLocation() is not None:
cont.setLocation(self.getOwnLocation().clone())
if self.getOwnRoom() is not None:
cont.setRoom(self.getOwnRoom().clone())
cont.setBoardNumber(self.getBoardNumber())
cont.setReportNumberHolder(self.getReportNumberHolder().clone(self))
cont.setStatus(self.getCurrentStatus())
if self.getType() is not None :
for ct in cont.getConference().getContribTypeList() :
if ct.getName() == self.getType().getName() :
cont.setType(ct)
break
if options.get("tracks", False) :
if self.getTrack() is not None :
for tr in cont.getConference().getTrackList() :
if tr.getTitle() == self.getTrack().getTitle() :
cont.setTrack(tr)
break
else :
cont.setTrack(None)
if options.get("access", False) :
cont.setProtection(self.getAccessController()._getAccessProtection())
for u in self.getAllowedToAccessList() :
cont.grantAccess(u)
for mgr in self.getManagerList() :
cont.grantModification(mgr)
for sub in self.getSubmitterList() :
cont.grantSubmission(sub)
for domain in self.getDomainList():
cont.requireDomain(domain)
if options.get("authors", False) :
for a in self.getPrimaryAuthorList() :
cont.addPrimaryAuthor(a.clone())
for ca in self.getCoAuthorList() :
cont.addCoAuthor(ca.clone())
for sp in self.getSpeakerList():
cont.newSpeaker(sp.clone())
cont.setSpeakerText(self.getSpeakerText())
if options.get("subcontribs", False) :
for sc in self.getSubContributionList() :
cont.addSubContribution(sc.clone(cont, self, options), subcontrib_id=sc.getId())
return cont
def notifyModification( self, date = None, raiseEvent = True, cleanCache = True):
self.setModificationDate(date)
if raiseEvent:
signals.event.contribution_data_changed.send(self)
if cleanCache:
self.cleanCache()
parent = self.getParent()
if parent:
parent.setModificationDate()
self._p_changed = 1
def cleanCache(self, cleanConference = True):
# Do not clean cache if already cleaned
if not ContextManager.get('clean%s'%self.getUniqueId(), False):
ScheduleToJson.cleanCache(self)
ContextManager.set('clean%s'%self.getUniqueId(), cleanConference)
def getCategoriesPath(self):
return self.getConference().getCategoriesPath()
def getModifKey( self ):
return self.getConference().getModifKey()
def getAccessKey( self ):
return self.getConference().getAccessKey()
def getLocator( self ):
"""Gives back a globaly unique identification encapsulated in a Locator
object for the contribution instance
"""
if self.getConference() == None:
return Locator()
lconf = self.getConference().getLocator()
if self.getSession() is not None:
lconf["sessionId"] = self.getSession().getId()
lconf["contribId"] = self.getId()
return lconf
def _setConference( self, conf ):
self.parent = conf
def _setId( self, id ):
self.id = id
def includeInConference( self, conf, id ):
"""sets the conference of a contribution
"""
if self.getConference() is not None:
#raise MaKaCError("the contribution is already included in a conference")
pass
else:
self._setConference( conf )
self._setId( id )
def delete( self ):
"""deletes a contribution and all of its subitems
"""
oldParent = self.getConference()
if oldParent != None:
signals.event.contribution_deleted.send(self, parent=oldParent)
self.setTrack(None)
self.remove_attachments()
self.removeReviewing()
self.notify_protection_to_owner(self, delete=True)
self.setSession(None)
while len(self.getSubContributionList()) > 0:
sc = self.getSubContributionList()[0]
self.removeSubContribution(sc)
# delete it from parent session (if it exists)
if self.getOwner() != self.getConference():
self.getOwner().removeContribution( self )
# (always) delete it from the parent conference
self.getConference().removeContribution( self, callDelete=False )
self._setConference( None )
self.setStatus(ContribStatusNone(self))
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def setId( self, newId ):
self._setId(newId)
def getId( self ):
return self.id
def getUniqueId( self ):
"""returns (string) the unique identifier of the item"""
"""used mainly in the web session access key table"""
return "%st%s" % (self.getConference().getUniqueId(),self.id)
def setTitle( self, newTitle, notify = True ):
oldTitle = self.title
self.title = newTitle.strip()
if notify:
if oldTitle != newTitle:
signals.event.contribution_title_changed.send(self, old=oldTitle, new=newTitle)
self.notifyModification()
def getTitle( self ):
if self.title.strip() == "":
return "(no title)"
return self.title
def getDescription(self):
return str(self.getField("content"))
def setDescription(self, desc):
self.setField("content", desc)
def setParent(self,parent):
self.parent=parent
self.notifyModification(cleanCache = False)
if self.parent==None:
return
def getParent( self ):
if self.getSession() is not None:
return self.getSession()
return self.getConference()
def getOwner( self ):
return self.getParent()
def setOwner(self, owner):
self.setParent(owner)
def getConference( self ):
return self.parent
def getSession( self ):
try:
if self._session:
pass
except AttributeError:
self._session=None
return self._session
def setSession(self,session):
if self.getSession()==session:
return
if self.isScheduled():
schEntry=self.getSchEntry()
schEntry.getSchedule().removeEntry(schEntry)
oldSession=self.getSession()
if oldSession is not None:
oldSession.removeContribution(self)
self._session=session
if session is not None:
session.addContribution(self)
def getContribution(self):
return self
def _resetSchEntry(self):
self.__schEntry=ContribSchEntry(self)
def getSchEntry(self):
if self.__schEntry is None or \
not isinstance(self.__schEntry,ContribSchEntry):
self._resetSchEntry()
return self.__schEntry
def isScheduled(self):
#For the moment we do it like this
return self.getSchEntry().getSchedule() is not None
def isWithdrawn(self):
return isinstance(self.getCurrentStatus(), ContribStatusWithdrawn)
def getLocationParent(self):
"""
Returns the object from which the room/location
information should be inherited
"""
if not self.getConference().getEnableSessionSlots() and self.getSession():
return self.getSession()
if self.isScheduled():
return self.getSchEntry().getSchedule().getOwner()
return self.getOwner()
def getOwnLocation(self):
return self.place
def setLocation(self, newLocation):
oldLocation = self.place
self.place = newLocation
self.notifyModification()
def getOwnRoom(self):
return self.room
def setRoom(self, newRoom):
oldRoom = self.room
self.room = newRoom
self.notifyModification()
def setBoardNumber(self, newBoardNum):
self._boardNumber=str(newBoardNum).strip()
def getBoardNumber(self):
try:
if self._boardNumber:
pass
except AttributeError:
self._boardNumber=""
return self._boardNumber
def verifyStartDate(self, sDate, check=2):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
tz = timezone(self.getConference().getTimezone())
if self.getSchEntry().getSchedule():
owner = self.getSchEntry().getSchedule().getOwner()
else:
owner = self.getOwner()
if sDate < owner.getStartDate():
if check == 1:
raise ParentTimingError(_("The contribution <i>\"%s\"</i> cannot start before (%s) its parent (%s)") %\
(self.getTitle(), sDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
owner.getStartDate().astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Contribution"))
if check == 2:
ContextManager.get('autoOps').append((self, "OWNER_START_DATE_EXTENDED",
owner, sDate.astimezone(tz)))
owner.setDates(sDate,owner.getEndDate(), check)
if sDate > owner.getEndDate():
if check == 1:
raise ParentTimingError(_("The contribution <i>\"%s\"</i> cannot start after (%s) its parent end date(%s)") %\
(self.getTitle(), sDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
owner.getEndDate().astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Contribution"))
if check == 2:
owner.setEndDate(sDate+self.getDuration(),check)
# Check that after modifying the start date, the end date is still within the limits of the slot
if self.getDuration() and sDate + self.getDuration() > owner.getEndDate():
if check==1:
raise ParentTimingError("The contribution cannot end after (%s) its parent ends (%s)"%\
((sDate + self.getDuration()).astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
owner.getAdjustedEndDate().strftime('%Y-%m-%d %H:%M')),\
_("Contribution"))
elif check==2:
# update the schedule
owner.setEndDate(sDate + self.getDuration(),check)
ContextManager.get('autoOps').append((self, "OWNER_END_DATE_EXTENDED",
owner, owner.getAdjustedEndDate()))
def setStartDate(self, newDate, check=2, moveEntries=0):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if newDate == None:
self.startDate=None
return
if not newDate.tzname():
raise MaKaCError("date should be timezone aware")
if newDate != None and check != 0:
self.verifyStartDate(newDate, check)
self.startDate=copy.copy(newDate)
self.getSchEntry().synchro()
self.notifyModification()
def getStartDate(self):
return self.startDate
def getAdjustedStartDate(self, tz=None):
if self.getStartDate() is None:
return None
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.getStartDate().astimezone(timezone(tz))
def getEndDate(self):
if self.getStartDate() is None:
return None
return self.getStartDate()+self.getDuration()
def getAdjustedEndDate(self, tz=None):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
if self.getEndDate():
return self.getEndDate().astimezone(timezone(tz))
return None
def getDuration(self):
return self.duration
def verifyDuration(self, check=2):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
tz = timezone(self.getConference().getTimezone())
endDate = self.getEndDate()
if self.getSchEntry().getSchedule() is not None:
owner = self.getSchEntry().getSchedule().getOwner()
if endDate > owner.getEndDate():
if check==1:
raise ParentTimingError(_("The contribution \"%s\" ending date (%s) has to fit between its parent's dates (%s - %s)") %\
(self.getTitle(), endDate.astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
owner.getStartDate().astimezone(tz).strftime('%Y-%m-%d %H:%M'),\
owner.getEndDate().astimezone(tz).strftime('%Y-%m-%d %H:%M')),\
_("Contribution"))
elif check==2:
ContextManager.get('autoOps').append((self, "OWNER_END_DATE_EXTENDED",
owner, self.getAdjustedEndDate()))
owner.setEndDate(endDate, check)
def setDuration(self, hours=0, minutes=15, check=2, dur=0):
"""check parameter:
0: no check at all
1: check and raise error in case of problem
2: check and adapt the owner dates"""
if dur!=0:
self.duration=dur
else:
self.duration=timedelta(hours=int(hours),minutes=int(minutes))
if check != 0:
self.verifyDuration(check)
self.getSchEntry().synchro()
self.notifyModification()
def _addAuthor(self, part):
"""
"""
try:
if self._authors:
pass
except AttributeError:
self._authors = OOBTree()
try:
if self._authorGen:
pass
except AttributeError:
self._authorGen=Counter()
newId = part.getId()
if newId == "":
newId = str( self._authorGen.newCount() )
self._authors[newId] = part
part.includeInContribution( self, newId )
def _removeAuthor(self, part):
"""
"""
try:
if self._authors:
pass
except AttributeError:
self._authors = OOBTree()
if not self._authors.has_key( part.getId() ):
return
del self._authors[ part.getId() ]
self._p_changed = True
if not self.isSpeaker(part):
part.delete()
def addPrimaryAuthor(self, part, index=None):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors = []
self._addAuthor( part )
if index is not None:
self._primaryAuthors.insert(index, part)
else:
self._primaryAuthors.append( part )
if self.getConference() is not None:
self.getConference().indexAuthor(part)
self.notifyModification(cleanCache = False)
def removePrimaryAuthor(self, part, removeSpeaker=1, removePendingSubm=True):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors = []
if part not in self._primaryAuthors:
return
if self.getConference() is not None:
self.getConference().unindexAuthor(part)
self._primaryAuthors.remove( part )
if removeSpeaker:
self.removeSpeaker( part )
self._removeAuthor( part )
if removePendingSubm:
#--Pending queue: remove pending participant waiting to became submitter if anything
self.getConference().getPendingQueuesMgr().removePendingSubmitter(part)
#--
self.notifyModification(cleanCache = False)
def recoverPrimaryAuthor(self, pa, isPendingSubmitter):
self.addPrimaryAuthor(pa)
pa.recover()
if isPendingSubmitter:
self.getConference().getPendingQueuesMgr().addPendingSubmitter(pa, False)
def isPrimaryAuthor(self, part):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors = []
return part in self._primaryAuthors
def isCoAuthor(self, part):
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors = []
return part in self._coAuthors
def isPrimaryAuthorByEmail(self, email):
for prAuthor in self.getPrimaryAuthorList():
if prAuthor.getEmail() == email:
return True
return False
def isCoAuthorByEmail(self, email):
for coAuthor in self.getCoAuthorList():
if coAuthor.getEmail() == email:
return True
return False
def isSpeakerByEmail(self, email):
for speaker in self.getSpeakerList():
if speaker.getEmail() == email:
return True
return False
def changePosPrimaryAuthor(self, part, index):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors=[]
if not part in self._primaryAuthors:
return
self._primaryAuthors.remove(part)
self._primaryAuthors.insert(index,part)
self.notifyModification(cleanCache = False)
def upPrimaryAuthor(self, part):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors=[]
try:
idx=self._primaryAuthors.index(part)
except ValueError:
return
if idx==0:
return
self._primaryAuthors.remove(part)
self._primaryAuthors.insert(idx-1,part)
self.notifyModification(cleanCache=False)
def downPrimaryAuthor(self, part):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors=[]
try:
idx=self._primaryAuthors.index(part)
except ValueError:
return
if idx>len(self._primaryAuthors):
return
self._primaryAuthors.remove(part)
self._primaryAuthors.insert(idx+1,part)
self.notifyModification(cleanCache = False)
def newAuthorsList(self, prAuthors, coAuthors):
''' calculate new lists of both kind of authors, because something has
been changed the position by drag and drop '''
newPrList = self.calculateNewAuthorList(prAuthors, "prAuthor")
newCoList = self.calculateNewAuthorList(coAuthors, "coAuthor")
self.setPrimaryAuthorList(newPrList)
self.setCoAuthorList(newCoList)
def calculateNewAuthorList(self, list, kind):
result = []
if kind == "prAuthor":
for auth in list:
author = self.getPrimaryAuthorById(auth['id'])
if author:
result.append(author)
else:
author = self.getCoAuthorById(auth['id'])
if author:
result.append(author)
elif kind == "coAuthor":
for auth in list:
author = self.getCoAuthorById(auth['id'])
if author:
result.append(author)
else:
author = self.getPrimaryAuthorById(auth['id'])
if author:
result.append(author)
return result
def getPrimaryAuthorById(self, authorId):
for author in self.getPrimaryAuthorList():
if authorId == author.getId():
return author
return None
def getCoAuthorById(self, authorId):
for author in self.getCoAuthorList():
if authorId == author.getId():
return author
return None
def setPrimaryAuthorList(self, l):
self._primaryAuthors = l
self.notifyModification(cleanCache = False)
def setCoAuthorList(self, l):
self._coAuthors = l
self.notifyModification(cleanCache = False)
def changePosCoAuthor(self, part, index):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors=[]
if not part in self._coAuthors:
return
self._coAuthors.remove(part)
self._coAuthors.insert(index,part)
self.notifyModification(cleanCache = False)
def upCoAuthor(self, part):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors=[]
try:
idx=self._coAuthors.index(part)
except ValueError:
return
if idx==0:
return
self._coAuthors.remove(part)
self._coAuthors.insert(idx-1,part)
self.notifyModification(cleanCache = False)
def downCoAuthor(self, part):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors=[]
try:
idx=self._coAuthors.index(part)
except ValueError:
return
if idx>len(self._coAuthors):
return
self._coAuthors.remove(part)
self._coAuthors.insert(idx+1,part)
self.notifyModification(cleanCache = False)
def getPrimaryAuthorList(self):
"""
"""
try:
if self._primaryAuthors:
pass
except AttributeError:
self._primaryAuthors = []
return self._primaryAuthors
getPrimaryAuthorsList = getPrimaryAuthorList
def getAuthorList(self):
"""
"""
try:
if self._authors:
pass
except AttributeError:
self._authors = OOBTree()
return self._authors.values()
def getAllAuthors(self):
""" This method returns a list composed by the primary authors
and co-authors. The different with getAuthorList() is the type
of the output.
"""
return self.getPrimaryAuthorList() + self.getCoAuthorList()
def addCoAuthor(self, part, index=None):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors = []
self._addAuthor( part )
if index is not None:
self._coAuthors.insert(index, part)
else:
self._coAuthors.append( part )
if self.getConference() is not None:
self.getConference().indexAuthor(part)
self.notifyModification(cleanCache = False)
def removeCoAuthor(self, part, removeSpeaker=1, removePendingSubm=True):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors = []
if part not in self._coAuthors:
return
if self.getConference() is not None:
self.getConference().unindexAuthor(part)
self._coAuthors.remove( part )
if removeSpeaker:
self.removeSpeaker( part )
self._removeAuthor( part )
if removePendingSubm:
#--Pending queue: remove pending participant waiting to became submitter if anything
self.getConference().getPendingQueuesMgr().removePendingSubmitter(part)
#--
self.notifyModification(cleanCache = False)
def recoverCoAuthor(self, ca, isPendingSubmitter):
self.addCoAuthor(ca)
ca.recover()
if isPendingSubmitter:
self.getConference().getPendingQueuesMgr().addPendingSubmitter(ca, False)
def getCoAuthorList(self):
"""
"""
try:
if self._coAuthors:
pass
except AttributeError:
self._coAuthors = []
return self._coAuthors
def getAuthorById(self, authorId):
"""
"""
try:
if self._authors:
pass
except AttributeError:
self._authors = OOBTree()
return self._authors.get( authorId.strip(), None )
def isAuthor(self, part):
"""
"""
try:
if self._authors:
pass
except AttributeError:
self._authors = OOBTree()
return self._authors.has_key( part.getId() )
def getSpeakerById(self, authorId):
"""
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
for spk in self._speakers:
if spk.getId() == authorId:
return spk
return None
def changePosSpeaker(self, part, index):
"""
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
if not part in self._speakers:
return
self._speakers.remove(part)
self._speakers.insert(index,part)
self.notifyModification()
def addSpeaker(self, part, index=None):
"""
Adds a speaker (ContributionParticipation object) to the contribution
forcing it to be one of the authors of the contribution
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
if not self.isAuthor( part ):
raise MaKaCError( _("The Specified speaker is not the Author"), _("Contribution"))
if index is not None:
self._speakers.insert(index, part)
else:
self._speakers.append( part )
if self.getConference() is not None:
self.getConference().indexSpeaker(part)
self.notifyModification()
def newSpeaker(self, part):
"""
Adds a new speaker (ContributionParticipation object) to the contribution
setting the speakers ID and the fact it belongs to that contribution
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
try:
if self._authorGen:
pass
except AttributeError:
self._authorGen=Counter()
self._speakers.append( part )
newId = part.getId()
if newId == "":
newId = str( self._authorGen.newCount() )
part.includeInContribution(self, newId)
if self.getConference() is not None:
self.getConference().indexSpeaker(part)
self.notifyModification()
def removeSpeaker(self, part):
"""
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
if part not in self._speakers:
return
self._speakers.remove( part )
if self.getConference() is not None:
self.getConference().unindexSpeaker(part)
if part not in self.getAuthorList():
part.delete()
#--Pending queue: remove pending participant waiting to became submitter if anything
self.getConference().getPendingQueuesMgr().removePendingSubmitter(part)
#--
self.notifyModification()
def recoverSpeaker(self, spk, isPendingSubmitter):
self.newSpeaker(spk)
spk.recover()
if isPendingSubmitter:
self.getConference().getPendingQueuesMgr().addPendingSubmitter(spk, False)
def isSpeaker(self, part):
"""
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
return part in self._speakers
def getSpeakerList(self):
"""
"""
try:
if self._speakers:
pass
except AttributeError:
self._speakers = []
return self._speakers
def getSpeakerText(self):
#to be removed
try:
if self.speakerText:
pass
except AttributeError, e:
self.speakerText = ""
return self.speakerText
def setSpeakerText(self, newText):
self.speakerText = newText.strip()
def appendSpeakerText(self, newText):
self.setSpeakerText("%s, %s" % (self.getSpeakerText(), newText.strip()))
def isProtected(self):
# tells if a contribution is protected or not
return (self.hasProtectedOwner() + self.getAccessProtectionLevel()) > 0
def getAccessProtectionLevel(self):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected(self):
return self.__ac.isItselfProtected()
def hasAnyProtection(self):
"""Tells whether a contribution has any kind of protection over it:
access or domain protection.
"""
if self.__ac.isProtected():
return True
if self.getDomainList():
return True
if self.getAccessProtectionLevel() == -1:
return False
if self.getOwner():
return self.getOwner().hasAnyProtection()
else:
return False
def hasProtectedOwner(self):
if self.getOwner() != None:
return self.getOwner().isProtected()
return False
def setProtection(self, private):
oldValue = 1 if self.isProtected() else -1
self.__ac.setProtection( private )
self.notify_protection_to_owner(self)
if oldValue != private:
# notify listeners
signals.event.contribution_protection_changed.send(self, old=oldValue, new=private)
def grantAccess(self, prin):
self.__ac.grantAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "access")
self.notifyModification(raiseEvent = False)
def revokeAccess( self, prin ):
self.__ac.revokeAccess( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "access")
self.notifyModification(raiseEvent = False)
def canView( self, aw ):
"""tells whether the specified user has access to the current object
or any of its sub-objects
"""
if self.canAccess( aw ):
return True
################################################################################################
for sc in self.getSubContributionList():
if sc.canView( aw ):
return True
return False
def isAllowedToAccess( self, user ):
if not user:
return False
return (not self.isItselfProtected() and self.getOwner().isAllowedToAccess( user )) or\
self.__ac.canUserAccess( user ) or\
self.canUserModify( user ) or \
self.canUserSubmit(user)
def canAccess( self, aw ):
# Allow harvesters (Invenio, offline cache) to access
# protected pages
if has_request_context() and self.__ac.isHarvesterIP(request.remote_addr):
return True
#####################################################
if self.canModify(aw):
return True
if not self.canIPAccess(request.remote_addr) and not self.isAllowedToAccess(aw.getUser()):
return False
if not self.isProtected():
return True
flag = self.isAllowedToAccess( aw.getUser() )
return flag or self.getConference().canKeyAccess(aw)
def grantModification( self, prin ):
self.__ac.grantModification( prin )
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "manager")
self.notifyModification(raiseEvent = False)
def revokeModification( self, prin ):
self.__ac.revokeModification( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "manager")
self.notifyModification(raiseEvent = False)
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user) or self.getConference().canKeyModify()
def canUserModify( self, av ):
"""Tells whether a user is allowed to modify the current contribution:
only if the user is granted to modify the contribution or the user
can modify any of its upper objects (i.e. conference or session).
"""
return self.getParent().canUserModify( av ) or self.__ac.canModify( av )
def getManagerList( self ):
return self.__ac.getModifierList()
def getAllowedToAccessList( self ):
return self.__ac.getAccessList()
def addMaterial( self, newMat ):
newMat.setId( str(self.__materialGenerator.newCount()) )
newMat.setOwner( self )
self.materials[ newMat.getId() ] = newMat
self.notifyModification()
def removeMaterial(self, mat):
if mat.getId().lower() == 'reviewing':
self.removeReviewing()
def newSubContribution(self):
newSub = SubContribution()
self.addSubContribution(newSub)
signals.event.subcontribution_created.send(newSub, parent=self)
return newSub
def addSubContribution(self, newSubCont, subcontrib_id=None):
if subcontrib_id is None:
newSubCont.setId(str(self.__subContGenerator.newCount()))
else:
newSubCont.setId(str(subcontrib_id))
self.__subContGenerator.sync(subcontrib_id)
newSubCont.setOwner( self )
self._subConts.append( newSubCont )
self.notifyModification(cleanCache = False)
def removeSubContribution( self, subCont ):
if subCont in self._subConts:
subCont.delete()
subCont.setOwner(None)
self._subConts.remove(subCont)
self.notifyModification(cleanCache = False)
def recoverSubContribution( self, recSubCont ):
# Id must already be set in recSubCont.
recSubCont.setOwner( self )
self._subConts.append( recSubCont )
recSubCont.recover()
self.notifyModification(cleanCache = False)
def getSubContributionById(self, SCId):
for sb in self._subConts:
if sb.getId() == SCId:
return sb
def getSubContributionList(self):
return self._subConts
def iterSubContributions(self):
return iter(self._subConts)
def getNumberOfSubcontributions(self):
return len(self._subConts)
def upSubContribution(self, subcont):
if subcont in self._subConts:
if self._subConts.index(subcont) != 0:
index = self._subConts.index(subcont)
sb = self._subConts.pop(index)
self._subConts.insert(index-1, sb)
self.notifyModification(cleanCache = False)
def downSubContribution(self, subCont):
if subCont in self._subConts:
if self._subConts.index(subCont) < len(self._subConts)-1:
index = self._subConts.index(subCont)
sb = self._subConts.pop(index)
self._subConts.insert(index+1, sb)
self.notifyModification(cleanCache = False)
def setReviewing( self, newReviewing ):
if self.getReviewing() != None:
raise MaKaCError( _("The reviewing maretial for this contribution has already been set"), _("Contribution"))
self.reviewing=newReviewing
self.reviewing.setOwner( self )
self.notifyModification()
def removeReviewing( self ):
if self.getReviewing() is None:
return
self.reviewing.delete()
self.reviewing.setOwner(None)
self.reviewing = None
self.notifyModification()
def recoverReviewing(self, p):
self.setReviewing(p)
p.recover()
def getReviewing( self ):
try:
if self.reviewing:
pass
except AttributeError, e:
self.reviewing = None
return self.reviewing
def getMasterSchedule( self ):
return self.getOwner().getSchedule()
def requireDomain( self, dom ):
self.__ac.requireDomain( dom )
def freeDomain( self, dom ):
self.__ac.freeDomain( dom )
def getDomainList( self ):
return self.__ac.getRequiredDomainList()
def getTrack( self ):
try:
if self._track:
pass
except AttributeError:
self._track = None
return self._track
def setTrack( self, newTrack ):
currentTrack = self.getTrack()
if newTrack == currentTrack:
return
if currentTrack:
currentTrack.removeContribution( self )
self._track = newTrack
if self._track:
self._track.addContribution( self )
def removeTrack(self, track):
if track == self._track:
self._track = None
def setType( self, newType ):
self._type = newType
def getType( self ):
try:
if self._type:
pass
except AttributeError:
self._type = None
return self._type
def getModificationDate( self ):
"""Returns the date in which the contribution was last modified"""
try:
return self._modificationDS
except:
if self.getConference():
self._modificationDS = self.getConference().getModificationDate()
else:
self._modificationDS = nowutc()
return self._modificationDS
def getCurrentStatus(self):
try:
if self._status:
pass
except AttributeError:
self._status=ContribStatusNotSch(self)
return self._status
getStatus = getCurrentStatus
def setStatus(self,newStatus):
"""
"""
self._status=newStatus
def withdraw(self,resp,comment):
""" Remove or put a contribution in a conference
"""
if self.isWithdrawn():
#put back the authors in the author index
for auth in self.getAuthorList():
self.getConference().getAuthorIndex().index(auth)
for spk in self.getSpeakerList():
self.getConference().getSpeakerIndex().index(spk)
#change the status of the contribution
self._status=ContribStatusNotSch(self)
else:
#remove the authors from the author index
if self.getConference() is not None:
for auth in self.getAuthorList():
self.getConference().getAuthorIndex().unindex(auth)
for spk in self.getSpeakerList():
self.getConference().unindexSpeaker(spk)
#remove the contribution from any schedule it is included
if self.isScheduled():
self.getSchEntry().getSchedule().removeEntry(self.getSchEntry())
self.getCurrentStatus().withdraw(resp,comment)
def getSubmitterList(self, no_groups=False):
try:
if self._submitters:
pass
except AttributeError:
self._submitters=[] #create the attribute
self.notifyModification(raiseEvent = False)
if no_groups:
return [s for s in self._submitters if not isinstance(s, GroupWrapper)]
else:
return self._submitters
def _grantSubmission(self,av):
if av not in self.getSubmitterList():
self.getSubmitterList().append(av)
if self.getConference() is not None:
self.getConference().addContribSubmitter(self,av)
if isinstance(av, AvatarUserWrapper):
av.linkTo(self, "submission")
self.notifyModification(raiseEvent = False)
def _grantSubmissionEmail(self, email):
"""Returns True if submission email was granted. False if email was already in the list.
"""
if not email.lower() in map(lambda x: x.lower(), self.getSubmitterEmailList()):
self.getSubmitterEmailList().append(email.lower().strip())
return True
return False
def revokeSubmissionEmail(self, email):
if email in self.getSubmitterEmailList():
self.getSubmitterEmailList().remove(email)
self._p_changed=1
def grantSubmission(self, sb, sendEmail=True):
"""Grants a user with submission privileges for the contribution
- sb: can be an Avatar or an Author (primary author, co-author, speaker)
"""
if isinstance(sb, ContributionParticipation) or isinstance(sb, SubContribParticipation):
ah = AvatarHolder()
results=ah.match({"email":sb.getEmail()}, exact=1, searchInAuthenticators=False)
if not results:
results=ah.match({"email":sb.getEmail()}, exact=1)
r=None
for i in results:
if i.hasEmail(sb.getEmail()):
r=i
break
if r and r.isActivated():
self._grantSubmission(r)
elif sb.getEmail():
self.getConference().getPendingQueuesMgr().addPendingSubmitter(sb, False)
submissionEmailGranted = self._grantSubmissionEmail(sb.getEmail())
if submissionEmailGranted and sendEmail:
notif = pendingQueues._PendingSubmitterNotification( [sb] )
mail.GenericMailer.sendAndLog(notif, self.getConference(), 'Contribution')
if self.getConference():
self.getConference().addContribSubmitter(self,sb)
else:
self._grantSubmission(sb)
def _revokeSubmission(self, av):
if av in self.getSubmitterList():
self.getSubmitterList().remove(av)
if self.getConference():
self.getConference().removeContribSubmitter(self, av)
if isinstance(av, AvatarUserWrapper):
av.unlinkTo(self, "submission")
self.notifyModification(raiseEvent = False)
def revokeSubmission(self, sb):
"""Removes submission privileges for the specified user
- sb: can be an Avatar or an Author (primary author, co-author, speaker)
"""
if isinstance(sb, ContributionParticipation) or isinstance(sb, SubContribParticipation):
ah = AvatarHolder()
results = ah.match({"email": sb.getEmail()}, exact=1, searchInAuthenticators=False)
r = None
for i in results:
if i.hasEmail(sb.getEmail()):
r=i
break
if r:
self._revokeSubmission(r)
else:
self.revokeSubmissionEmail(sb.getEmail())
else:
self._revokeSubmission(sb)
def revokeAllSubmitters(self):
self._submitters = []
self.notifyModification(raiseEvent = False)
def getSubmitterEmailList(self):
try:
return self._submittersEmail
except:
self._submittersEmail = []
return self._submittersEmail
def canUserSubmit(self, sb):
"""Tells whether a user can submit material for the current contribution
- sb: can be an Avatar or an Author (primary author, co-author, speaker)
"""
if sb is None:
return False
if isinstance(sb, ContributionParticipation) or isinstance(sb, SubContribParticipation):
sbEmail = sb.getEmail()
# Normally, we shouldn't get here unless we're adding someone as a Speaker or similar.
# `no_groups` is used so that we do not consider group membership, as to not confuse the
# user (since there will be speakers with "implicit" privileges) and avoid that hasEmail breaks
return any(submitter.hasEmail(sbEmail) for submitter in self.getSubmitterList(no_groups=True)) or \
any(submitterEmail == sbEmail for submitterEmail in self.getSubmitterEmailList())
for principal in self.getSubmitterList():
if principal != None and principal.containsUser(sb):
return True
return False
def getAccessController(self):
return self.__ac
def getReportNumberHolder(self):
try:
if self._reportNumberHolder:
pass
except AttributeError, e:
self._reportNumberHolder=ReportNumberHolder(self)
return self._reportNumberHolder
def setReportNumberHolder(self, rnh):
self._reportNumberHolder=rnh
@classmethod
def contributionStartDateForSort(cls, contribution):
""" Function that can be used as "key" argument to sort a list of contributions by start date
The contributions with no start date will be at the end with this sort
"""
if contribution.getStartDate():
return contribution.getStartDate()
else:
return maxDatetime()
def getColor(self):
res=""
if self.getSession() is not None:
res=self.getSession().getColor()
return res
def getTextColor(self):
res=""
if self.getSession() is not None:
res=self.getSession().getTextColor()
return res
class AcceptedContribution(Contribution):
"""This class represents a contribution which has been created from an
abstract
"""
def __init__(self, abstract):
Contribution.__init__(self)
abstract.getConference().addContribution(self, abstract.getId())
self._abstract = abstract
self.setTitle(abstract.getTitle())
self._setFieldsFromAbstract()
if isinstance(abstract.getCurrentStatus(), review.AbstractStatusAccepted):
self.setTrack(abstract.getCurrentStatus().getTrack())
self.setType(abstract.getCurrentStatus().getType())
for auth in abstract.getAuthorList():
c_auth = ContributionParticipation()
self._setAuthorValuesFromAbstract(c_auth, auth)
if abstract.isPrimaryAuthor(auth):
self.addPrimaryAuthor(c_auth)
else:
self.addCoAuthor(c_auth)
if abstract.isSpeaker(auth):
self.addSpeaker(c_auth)
self._grantSubmission(self.getAbstract().getSubmitter().getUser())
def _setAuthorValuesFromAbstract(self, cAuth, aAuth):
cAuth.setTitle(aAuth.getTitle())
cAuth.setFirstName(aAuth.getFirstName())
cAuth.setFamilyName(aAuth.getSurName())
cAuth.setEmail(aAuth.getEmail())
cAuth.setAffiliation(aAuth.getAffiliation())
cAuth.setAddress(aAuth.getAddress())
cAuth.setPhone(aAuth.getTelephone())
def _setFieldsFromAbstract(self):
for k, v in self._abstract.getFields().iteritems():
self.setField(k, v)
def getAbstract(self):
return self._abstract
def setAbstract(self, abs):
self._abstract = abs
def getSubmitterList(self, no_groups=False):
try:
if self._submitters:
pass
except AttributeError:
self._submitters = [] # create the attribute
self._grantSubmission(self.getAbstract().getSubmitter().getUser())
if no_groups:
return [s for s in self._submitters if not isinstance(s, GroupWrapper)]
else:
return self._submitters
def delete(self):
"""deletes a contribution and all of their subitems
"""
abs = self.getAbstract()
if abs:
cs = abs.getCurrentStatus()
if isinstance(cs, review.AbstractStatusAccepted):
if cs.getTrack() is not None:
abs.addTrack(cs.getTrack())
abs.setCurrentStatus(review.AbstractStatusSubmitted(abs))
abs._setContribution(None)
self.setAbstract(None)
Contribution.delete(self)
class ContribStatus(Persistent):
"""
"""
def __init__(self,contribution,responsible):
self._setContrib(contribution)
self._setResponsible(responsible)
self._setDate()
def clone(self, contribution, responsible):
cs = ContribStatus(contribution, responsible)
cs.setDate(self.getDate())
return cs
def _setContrib(self,newContrib):
self._contrib=newContrib
def getContrib(self):
return self._contrib
def _setResponsible(self,newResp):
self._responsible=newResp
def getResponsible(self):
return self._responsible
def _setDate(self):
self._date=nowutc()
def setDate(self, date):
self._date = date
def getDate(self):
return self._date
def withdraw(self,resp,comments=""):
self._contrib.setStatus(ContribStatusWithdrawn(self.getContrib(),resp,comments))
class ContribStatusNotSch(ContribStatus):
"""
"""
def __init__(self,contrib):
ContribStatus.__init__(self,contrib,None)
def clone(self, contribution):
csns = ContribStatusNotSch(contribution)
csns.setDate(self.getDate())
return csns
ContribStatusSubmitted=ContribStatusNotSch
class ContribStatusSch(ContribStatus):
"""
"""
def __init__(self,contrib):
ContribStatus.__init__(self,contrib,None)
def clone(self, contribution):
css = ContribStatusSch(contribution)
css.setDate(self.getDate())
return css
class ContribStatusWithdrawn(ContribStatus):
"""
"""
def __init__(self,contrib,resp,comments):
ContribStatus.__init__(self,contrib,resp)
self._setComment(comments)
def clone(self, contribution):
csw = ContribStatusWithdrawn(contribution)
csw.setDate(self.getDate())
csw.setComment(self.getComment())
return csw
def _setComment(self,text):
self._comment=text.strip()
def getComment(self):
return self._comment
class ContribStatusNone(ContribStatus):
# This is a special status we assign to contributions that are put in the trash can.
def __init__(self,contrib):
ContribStatus.__init__(self,contrib,None)
def clone(self, contribution):
csn = ContribStatusNone(contribution)
csn.setDate(self.getDate())
return csn
class SubContribParticipation(Persistent, Fossilizable):
fossilizes(ISubContribParticipationFossil, ISubContribParticipationFullFossil)
def __init__( self ):
self._subContrib = None
self._id = ""
self._firstName = ""
self._surName = ""
self._email = ""
self._affiliation = ""
self._address = ""
self._phone = ""
self._title = ""
self._fax = ""
def getConference(self):
if self._subContrib is not None:
return self._subContrib.getConference()
return None
def _notifyModification( self ):
if self._subContrib != None:
self._subContrib.notifyModification()
def setValues(self, data):
self.setFirstName(data.get("firstName", ""))
self.setFamilyName(data.get("familyName",""))
self.setAffiliation(data.get("affilation",""))
self.setAddress(data.get("address",""))
self.setEmail(data.get("email",""))
self.setFax(data.get("fax",""))
self.setTitle(data.get("title",""))
self.setPhone(data.get("phone",""))
self._notifyModification()
def getValues(self):
data={}
data["firstName"]=self.getFirstName()
data["familyName"]=self.getFamilyName()
data["affilation"]=self.getAffiliation()
data["address"]=self.getAddress()
data["email"]=self.getEmail()
data["fax"]=self.getFax()
data["title"]=self.getTitle()
data["phone"]=self.getPhone()
return data
def clone(self):
part = SubContribParticipation()
part.setValues(self.getValues())
return part
def setDataFromAvatar(self,av):
# av is an Avatar object.
if av is None:
return
self.setFirstName(av.getName())
self.setFamilyName(av.getSurName())
self.setEmail(av.getEmail())
self.setAffiliation(av.getOrganisation())
self.setAddress(av.getAddress())
self.setPhone(av.getTelephone())
self.setTitle(av.getTitle())
self.setFax(av.getFax())
self._notifyModification()
def setDataFromAuthor(self,au):
# au is a ContributionParticipation object.
if au is None:
return
self.setFirstName(au.getFirstName())
self.setFamilyName(au.getFamilyName())
self.setEmail(au.getEmail())
self.setAffiliation(au.getAffiliation())
self.setAddress(au.getAddress())
self.setPhone(au.getPhone())
self.setTitle(au.getTitle())
self.setFax(au.getFax())
self._notifyModification()
def setDataFromSpeaker(self,spk):
# spk is a SubContribParticipation object.
if spk is None:
return
self.setFirstName(spk.getFirstName())
self.setFamilyName(spk.getFamilyName())
self.setEmail(spk.getEmail())
self.setAffiliation(spk.getAffiliation())
self.setAddress(spk.getAddress())
self.setPhone(spk.getPhone())
self.setTitle(spk.getTitle())
self.setFax(spk.getFax())
self._notifyModification()
def includeInSubContrib( self, subcontrib, id ):
if self.getSubContrib() == subcontrib and self.getId()==id.strip():
return
self._subContrib = subcontrib
self._id = id
def delete( self ):
self._subContrib = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
@Updates ('MaKaC.conference.SubContribParticipation', 'id')
def setId(self, newId):
self._id = newId
def getId( self ):
return self._id
def getSubContrib( self ):
return self._subContrib
def getContribution( self ):
if self._subContrib is not None:
return self._subContrib.getContribution()
return None
def _unindex(self):
contrib=self.getContribution()
if contrib is not None:
conf=contrib.getConference()
if conf is not None:
conf.unindexAuthor(self)
conf.unindexSpeaker(self)
def _index(self):
contrib=self.getContribution()
if contrib is not None:
conf=contrib.getConference()
if conf is not None:
conf.indexAuthor(self)
conf.indexSpeaker(self)
@Updates ('MaKaC.conference.SubContribParticipation', 'firstName')
def setFirstName( self, newName ):
tmp=newName.strip()
if tmp==self._firstName:
return
self._unindex()
self._firstName=tmp
self._index()
self._notifyModification()
def getFirstName( self ):
return self._firstName
def getName( self ):
return self._firstName
@Updates ('MaKaC.conference.SubContribParticipation', 'familyName')
def setFamilyName( self, newName ):
tmp=newName.strip()
if tmp==self._surName:
return
self._unindex()
self._surName=tmp
self._index()
self._notifyModification()
def getFamilyName( self ):
return self._surName
def getSurName( self ):
return self._surName
@Updates ('MaKaC.conference.SubContribParticipation', 'email')
def setEmail( self, newMail ):
tmp=newMail.strip()
if tmp==self._email:
return
self._unindex()
self._email=newMail.strip()
self._index()
self._notifyModification()
def getEmail( self ):
return self._email
@Updates ('MaKaC.conference.SubContribParticipation', 'affiliation')
def setAffiliation( self, newAffil ):
self._affiliation = newAffil.strip()
self._notifyModification()
def getAffiliation( self ):
return self._affiliation
@Updates ('MaKaC.conference.SubContribParticipation', 'address')
def setAddress( self, newAddr ):
self._address = newAddr.strip()
self._notifyModification()
def getAddress( self ):
return self._address
@Updates ('MaKaC.conference.SubContribParticipation', 'phone')
def setPhone( self, newPhone ):
self._phone = newPhone.strip()
self._notifyModification()
def getPhone( self ):
return self._phone
@Updates ('MaKaC.conference.SubContribParticipation', 'title')
def setTitle( self, newTitle ):
self._title = newTitle.strip()
self._notifyModification()
def getTitle( self ):
return self._title
def setFax( self, newFax ):
self._fax = newFax.strip()
self._notifyModification()
def getFax( self ):
try:
if self._fax:
pass
except AttributeError:
self._fax=""
return self._fax
def getFullName( self ):
res = self.getFullNameNoTitle()
if self.getTitle() != "":
res = "%s %s"%( self.getTitle(), res )
return res
def getFullNameNoTitle(self):
res = safe_upper(self.getFamilyName())
if self.getFirstName():
if res.strip():
res = "%s, %s" % (res, self.getFirstName())
else:
res = self.getFirstName()
return res
def getAbrName(self):
res = self.getFamilyName()
if self.getFirstName():
if res:
res = "%s, " % res
res = "%s%s." % (res, safe_upper(self.getFirstName()[0]))
return res
def getDirectFullName(self):
res = self.getDirectFullNameNoTitle()
if self.getTitle():
res = "%s %s" % (self.getTitle(), res)
return res
def getDirectFullNameNoTitle(self, upper=True):
surName = safe_upper(self.getFamilyName()) if upper else self.getFamilyName()
return "{0} {1}".format(self.getFirstName(), surName).strip()
class SubContribution(CommonObjectBase, Locatable):
"""
"""
fossilizes(ISubContributionFossil, ISubContributionWithSpeakersFossil)
def __init__( self, **subContData ):
self.parent = None
self.id = ""
self.title = ""
self.description = ""
self.__schEntry = None
self.duration = timedelta( minutes=15 )
self.speakers = []
self.speakerText = ""
self.materials = {}
self.__materialGenerator = Counter() # Provides material unique
# identifiers whithin the current
self.poster = None # contribution
self.paper = None
self.slides = None
self.video = None
self.poster = None
self.minutes = None
self._authorGen = Counter()
self._keywords = ""
@return_ascii
def __repr__(self):
if self.parent:
parent_id = self.parent.getId()
event_id = self.getConference().getId() if self.getConference() else None
else:
parent_id = None
event_id = None
return '<SubContribution({}, {}, {}.{})>'.format(self.getId(), self.getTitle(), event_id, parent_id)
@property
@memoize_request
def note(self):
from indico.modules.events.notes.models.notes import EventNote
return EventNote.get_for_linked_object(self)
def updateNonInheritingChildren(self, elem, delete=False):
self.getOwner().updateNonInheritingChildren(elem, delete)
def getAccessController(self):
return self.getOwner().getAccessController()
def getKeywords(self):
try:
return self._keywords
except:
self._keywords = ""
return ""
def setKeywords(self, keywords):
self._keywords = keywords
def getLogInfo(self):
data = {}
data["subject"] = self.getTitle()
data["id"] = self.id
data["title"] = self.title
data["parent title"] = self.getParent().getTitle()
data["description"] = self.description
data["duration"] = "%s"%self.duration
data["minutes"] = self.minutes
for sp in self.speakers :
data["speaker %s"%sp.getId()] = sp.getFullName()
return data
def clone(self, deltaTime, parent, options):
sCont = SubContribution()
sCont.setParent(parent)
sCont.setTitle(self.getTitle())
sCont.setDescription(self.getDescription())
sCont.setKeywords(self.getKeywords())
dur = self.getDuration()
hours = dur.seconds / 3600
minutes = (dur.seconds % 3600) / 60
sCont.setDuration(hours, minutes)
sCont.setReportNumberHolder(self.getReportNumberHolder().clone(self))
# There is no _order attribute in this class
if options.get("authors", False) :
for s in self.getSpeakerList() :
sCont.newSpeaker(s.clone())
sCont.setSpeakerText(self.getSpeakerText())
sCont.notifyModification()
return sCont
def notifyModification(self, raiseEvent = True):
parent = self.getParent()
if parent:
parent.setModificationDate()
if raiseEvent:
signals.event.subcontribution_data_changed.send(self)
self._p_changed = 1
def getCategoriesPath(self):
return self.getConference().getCategoriesPath()
def getLocator( self ):
"""Gives back a globaly unique identification encapsulated in a Locator
object for the contribution instance
"""
lconf = self.getOwner().getLocator()
lconf["subContId"] = self.getId()
return lconf
def setId( self, newId ):
self.id = newId
def getId( self ):
return self.id
def getUniqueId( self ):
"""returns (string) the unique identifier of the item"""
"""used mainly in the web session access key table"""
return "%ssc%s" % (self.getParent().getUniqueId(),self.id)
def setTitle( self, newTitle ):
old_title = self.title
self.title = newTitle.strip()
if old_title != self.title:
signals.event.subcontribution_title_changed.send(self, old=old_title, new=self.title)
self.notifyModification()
def getTitle( self ):
if self.title.strip() == "":
return "(no title)"
return self.title
def setDescription( self, newDesc ):
self.description = newDesc.strip()
self.notifyModification()
def getDescription( self ):
return self.description
def setParent(self,parent):
self.parent = parent
if self.parent == None:
return
def getParent( self ):
return self.parent
def setOwner(self, owner):
self.setParent(owner)
def getOwner( self ):
return self.getParent()
def getConference( self ):
return self.parent.getConference()
def getSession( self ):
return self.parent.getSession()
def getContribution(self):
return self.parent
def getDuration( self ):
return self.duration
def setDuration( self, hours, minutes=0, dur=0 ):
if dur!=0:
self.duration=dur
else:
hours = int( hours )
minutes = int( minutes )
self.duration = timedelta(hours=hours,minutes=minutes )
self.notifyModification()
def getLocation( self ):
return self.getOwner().getLocation()
def getRoom( self ):
return self.getOwner().getRoom()
def getSpeakerById( self, id ):
"""
"""
for spk in self.speakers:
if spk.getId() == id:
return spk
return None
def newSpeaker( self, spk ):
"""
"""
self.speakers.append( spk )
try:
if self._authorGen:
pass
except AttributeError:
self._authorGen=Counter()
newId = spk.getId()
if newId == "":
newId = str( self._authorGen.newCount() )
spk.includeInSubContrib(self, newId)
if self.getConference() is not None:
self.getConference().indexSpeaker(spk)
self.notifyModification()
def removeSpeaker( self, spk ):
"""
"""
if spk not in self.speakers:
return
self.speakers.remove( spk )
if self.getConference() is not None:
self.getConference().unindexSpeaker(spk)
spk.delete()
self.notifyModification()
def recoverSpeaker(self, spk):
self.newSpeaker(spk)
spk.recover()
def isSpeaker( self, spk):
"""
"""
return spk in self._speakers
def getSpeakerList ( self ):
"""
"""
return self.speakers
def getSpeakerText( self ):
#to be removed
try:
if self.speakerText:
pass
except AttributeError, e:
self.speakerText = ""
return self.speakerText
def setSpeakerText( self, newText ):
self.speakerText = newText.strip()
def appendSpeakerText( self, newText ):
self.setSpeakerText( "%s, %s"%(self.getSpeakerText(), newText.strip()) )
# """
# There is no _order attribute in this class -
# the methods below are either obsolate or the feature has not been implemented
# """
# def setOrder( self, order ):
# self._order = order
# self.notifyModification()
#
# def getOrder(self):
# return self._order
def canIPAccess( self, ip ):
return self.getOwner().canIPAccess(ip)
def isProtected( self ):
return self.hasProtectedOwner()
def getAccessProtectionLevel( self ):
return self.getOwner().getAccessProtectionLevel()
def hasAnyProtection( self ):
"""Tells whether a subContribution has any kind of protection over it:
access or domain protection.
"""
return self.getOwner().hasAnyProtection()
def getManagerList( self ):
return self.parent.getManagerList()
def hasProtectedOwner( self ):
if self.getOwner() != None:
return self.getOwner().isProtected()
return False
def getAccessKey( self ):
return self.getOwner().getAccessKey()
def getModifKey( self ):
return self.getConference().getModifKey()
def canView( self, aw ):
"""tells whether the specified user has access to the current object
or any of its sub-objects
"""
if self.canAccess( aw ):
return True
return False
def isAllowedToAccess( self, user ):
return self.parent.isAllowedToAccess( user )
def canAccess( self, aw ):
return self.getOwner().canAccess(aw)
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user) or self.getConference().canKeyModify()
def canUserModify( self, av ):
"""Tells whether a user is allowed to modify the current contribution:
only if the user is granted to modify the contribution or the user
can modify any of its upper objects (i.e. conference or session).
"""
return self.getParent().canUserModify( av )
def canUserSubmit( self, av ):
return self.getOwner().canUserSubmit( av )
def getAllowedToAccessList( self ):
"""Currently the SubContribution class has no access list.
But instead of returning the owner Contribution's access list,
I am returning an empty list. Methods such as getRecursiveAllowedToAccess()
will call the owner Contribution anyway.
"""
return []
def addMaterial( self, newMat ):
newMat.setId( str(self.__materialGenerator.newCount()) )
newMat.setOwner( self )
self.materials[ newMat.getId() ] = newMat
self.notifyModification()
def getMasterSchedule( self ):
return self.getOwner().getSchedule()
def delete(self):
signals.event.subcontribution_deleted.send(self, parent=self.getOwner())
while len(self.getSpeakerList()) > 0:
self.removeSpeaker(self.getSpeakerList()[0])
self.remove_attachments()
TrashCanManager().add(self)
#self.unindex()
def recover(self):
TrashCanManager().remove(self)
def getReportNumberHolder(self):
try:
if self._reportNumberHolder:
pass
except AttributeError, e:
self._reportNumberHolder=ReportNumberHolder(self)
return self._reportNumberHolder
def setReportNumberHolder(self, rnh):
self._reportNumberHolder=rnh
class Material(CommonObjectBase):
"""This class represents a set of electronic documents (resources) which can
be attached to a conference, a session or a contribution.
A material can be of several types (achieved by specialising this class)
and is like a container of files which have some relation among them.
It contains the minimal set of attributes to store basic meta data and
provides useful operations to access and manage it.
Attributes:
owner -- (Conference, Session or Contribution) Object to which the
material is attached to
id -- (string) Material unique identifier. Normally used to uniquely
identify a material within a conference, session or contribution
title -- (string) Material denomination
description -- (string) Longer text describing in more detail material
intentions
type -- (string) String identifying the material classification
resources -- (PMapping) Collection of resouces grouped within the
material. Dictionary of references to Resource objects indexed
by their unique relative id.
"""
fossilizes(IMaterialMinimalFossil, IMaterialFossil)
def __init__( self, materialData=None ):
self.id = "not assigned"
self.__resources = {}
self.__resourcesIdGen = Counter()
self.title = ""
self.description = ""
self.type = ""
self.owner = None
self.__ac = AccessController(self)
self._mainResource = None
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
if self.getId().isdigit() and other.getId().isdigit():
return cmp(int(self.getId()), int(other.getId()))
else:
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def updateNonInheritingChildren(self, elem, delete=False):
# We do not want to store the inherited children in a Category because the funcionallity is not used
if not isinstance(self.getOwner(), Category):
self.getAccessController().updateNonInheritingChildren(elem, delete)
self.notify_protection_to_owner(elem, delete)
def notify_protection_to_owner(self, elem, delete=False):
self.getOwner().updateNonInheritingChildren(elem, delete)
def setValues( self, params ):
"""Sets all the values of the current material object from a diccionary
containing the following key-value pairs:
title-(str)
description-(str)
Please, note that this method sets ALL values which means that if
the given dictionary doesn't contain any of the keys the value
will set to a default value.
"""
self.setTitle(params.get("title", "NO TITLE ASSIGNED"))
self.setDescription( params.get( "description", "" ) )
self.notifyModification()
def clone ( self, owner):
mat = type(self)()
mat.setTitle(self.getTitle())
mat.setDescription(self.getDescription())
mat.notifyModification()
mat.setId(self.getId())
mat.setOwner(owner)
mat.setType(self.getType())
mat.setProtection(self.getAccessController()._getAccessProtection())
mat.setAccessKey(self.getAccessKey())
rlist = self.getResourceList()
for r in rlist:
newres = r.clone(mat)
mat.addResource(newres)
mat.setMainResource(self.getMainResource())
return mat
def notifyModification( self ):
parent = self.getOwner()
if parent:
parent.notifyModification(raiseEvent = False)
self._p_changed = 1
def getLocator( self ):
if self.owner == None:
return Locator()
lconf = self.owner.getLocator()
lconf["materialId"] = self.getId()
return lconf
def setId( self, newId ):
self.id = str(newId).strip()
def getId( self ):
return self.id
def getUniqueId( self ):
"""returns (string) the unique identifier of the item"""
"""used mainly in the web session access key table"""
return "%sm%s" % (self.getOwner().getUniqueId(),self.id)
def setOwner(self, newOwner):
self.owner = newOwner
def getOwner( self ):
return self.owner
def getCategory( self ):
if isinstance(self.getOwner(), Category):
return self.getOwner()
return None
def getConference( self ):
owner = self.getOwner()
if owner is None or isinstance(owner, Category):
return None
elif isinstance(owner, Conference):
return owner
else:
return owner.getConference()
def getSession( self ):
if self.getContribution():
return self.getContribution().getSession()
if isinstance(self.getOwner(), Session):
return self.getOwner()
if isinstance(self.getOwner(), SubContribution):
return self.getOwner().getSession()
return None
def getContribution( self ):
if self.getSubContribution():
return self.getSubContribution().getContribution()
if isinstance(self.getOwner(), Contribution):
return self.getOwner()
return None
def getSubContribution( self ):
if isinstance(self.getOwner(), SubContribution):
return self.getOwner()
return None
@Updates (['MaKaC.conference.Material',
'MaKaC.conference.Paper',
'MaKaC.conference.Slides',
'MaKaC.conference.Video',
'MaKaC.conference.Poster',
'MaKaC.conference.Reviewing'],'title')
def setTitle( self, newTitle ):
self.title = newTitle.strip()
self.notifyModification()
def getTitle( self ):
return self.title
@Updates (['MaKaC.conference.Material',
'MaKaC.conference.Paper',
'MaKaC.conference.Slides',
'MaKaC.conference.Video',
'MaKaC.conference.Poster',
'MaKaC.conference.Reviewing'], 'description')
def setDescription( self, newDescription ):
self.description = newDescription.strip()
self.notifyModification()
def getDescription( self ):
return self.description
def setType( self, newType ):
self.type = newType.strip()
self.notifyModification()
def getType( self ):
return self.type
def getReviewingState(self):
""" Returns the reviewing state of a material.
The state is represented by an integer:
0 : there's no reviewing state because the material does not belong to a contribution, or the conference
has not reviewing module enabled, or the module is enabled but the mode is "no reviewing"
1 : the material is not subject to reviewing, because this kind of material is not reviewable in the conference
2 : the material is subject to reviewing, but has not been submitted yet by the author
3 : the material is subject to reviewing, has been submitted by the author, but has not been judged yet
4 : the material is subject to reviewing, has been submitted by the author, and has been judged as Accepted
5 : the material is subject to reviewing, has been submitted by the author, and has been judged as Rejected
"""
if isinstance(self.owner, Contribution):
conference = self.owner.getConference()
if conference.getConfPaperReview().getChoice() == ConferencePaperReview.NO_REVIEWING: #conference has no reviewing process
return 0
else: #conference has reviewing
#if self.id in reviewableMaterials: #material is reviewable
if isinstance(self, Reviewing): #material is reviewable
lastReview = self.owner.getReviewManager().getLastReview()
if lastReview.isAuthorSubmitted(): #author has submitted
refereeJudgement = lastReview.getRefereeJudgement()
if refereeJudgement.isSubmitted(): #referee has submitted judgement
if refereeJudgement.getJudgement() == "Accept":
return 4
elif refereeJudgement.getJudgement() == "Reject":
return 5
else:
#we should never arrive here because referee judgements that are 'To be corrected'
#or a custom state should imply a new review being created, so the state is back to 2
raise MaKaCError("RefereeJudgement should be 'Accept' or 'Reject' in this method")
else: #referee has not submitted judgement
return 3
else: #author has not submitted
return 2
else: #material is not reviewable
return 1
else: #material does not belong to a contribution
return 0
def _getRepository( self ):
dbRoot = DBMgr.getInstance().getDBConnection().root()
try:
fr = dbRoot["local_repositories"]["main"]
except KeyError, e:
fr = fileRepository.MaterialLocalRepository()
dbRoot["local_repositories"] = OOBTree()
dbRoot["local_repositories"]["main"] = fr
return fr
def hasFile( self, name ):
for f in self.getResourceList():
if f.getName() == name:
return True
return False
def addResource( self, newRes, forcedFileId = None ):
newRes.setOwner( self )
newRes.setId( str( self.__resourcesIdGen.newCount() ) )
newRes.archive( self._getRepository(), forcedFileId = forcedFileId )
self.__resources[newRes.getId()] = newRes
self.notifyModification()
Logger.get('storage').debug("Finished storing resource %s for material %s" % (newRes.getId(), self.getLocator()))
def getResourceList(self, sort=True):
list = self.__resources.values()
if sort:
list.sort(key=lambda f: f.getName().lower())
return list
def getNbResources(self ):
return len(self.__resources)
def getResourceById( self, id ):
return self.__resources[id]
def removeResource( self, res ):
if res.getId() in self.__resources.keys():
del self.__resources[ res.getId() ]
res.delete()
self.notifyModification()
if self.getMainResource() is not None and \
self._mainResource.getId() == res.getId():
self._mainResource = None
def recoverResource(self, recRes):
recRes.setOwner(self)
self.__resources[recRes.getId()] = recRes
recRes.recover()
self.notifyModification()
def getMainResource(self):
try:
if self._mainResource:
pass
except AttributeError:
self._mainResource = None
return self._mainResource
def setMainResource(self, mr):
self._mainResource = mr
def delete(self):
self.__ac.unlinkAvatars('access')
for res in self.getResourceList():
self.removeResource( res )
if self.getReviewingState():
self.owner._reviewManager = ReviewManager(self.owner)
self.notify_protection_to_owner(self, delete=True)
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def isProtected(self):
# tells if a material is protected or not
return (self.hasProtectedOwner() + self.getAccessProtectionLevel()) > 0
def getAccessProtectionLevel( self ):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected( self ):
return self.__ac.isItselfProtected()
def hasProtectedOwner( self ):
if self.getOwner() != None:
return self.getOwner().isProtected()
return False
@Updates (['MaKaC.conference.Material',
'MaKaC.conference.Paper',
'MaKaC.conference.Slides',
'MaKaC.conference.Video',
'MaKaC.conference.Poster',
'MaKaC.conference.Reviewing'], 'protection', lambda(x): int(x))
def setProtection( self, private ):
self.__ac.setProtection( private )
self.notify_protection_to_owner(self)
self._p_changed = 1
def isHidden( self ):
return self.__ac.isHidden()
@Updates (['MaKaC.conference.Material',
'MaKaC.conference.Paper',
'MaKaC.conference.Slides',
'MaKaC.conference.Video',
'MaKaC.conference.Poster',
'MaKaC.conference.Reviewing'], 'hidden')
def setHidden( self, hidden ):
self.__ac.setHidden( hidden )
self._p_changed = 1
@Updates (['MaKaC.conference.Material',
'MaKaC.conference.Paper',
'MaKaC.conference.Slides',
'MaKaC.conference.Video',
'MaKaC.conference.Poster',
'MaKaC.conference.Reviewing'], 'accessKey')
def setAccessKey( self, pwd="" ):
self.__ac.setAccessKey(pwd)
self._p_changed = 1
def getAccessKey( self ):
return self.__ac.getAccessKey()
def grantAccess( self, prin ):
self.__ac.grantAccess( prin )
self._p_changed = 1
def revokeAccess( self, prin ):
self.__ac.revokeAccess( prin )
self._p_changed = 1
def canView( self, aw ):
"""tells whether the specified user has access to the current object
or any of its sub-objects
"""
if self.isHidden() and not self.canAccess( aw ):
return False
else:
return True
def isAllowedToAccess( self, user ):
return (not self.isItselfProtected() and self.getOwner().isAllowedToAccess( user )) or self.__ac.canUserAccess( user ) or self.canUserModify(user)
def canAccess( self, aw ):
# Allow harvesters (Invenio, offline cache) to access
# protected pages
if has_request_context() and self.__ac.isHarvesterIP(request.remote_addr):
return True
#####################################################
# Managers have always access
if self.canModify(aw):
return True
canUserAccess = self.isAllowedToAccess(aw.getUser())
canIPAccess = self.canIPAccess(request.remote_addr)
if not self.isProtected():
return canUserAccess or canIPAccess
else:
canKeyAccess = self.canKeyAccess(aw)
return canUserAccess or canKeyAccess
def canKeyAccess(self, aw):
key = session.get('accessKeys', {}).get(self.getUniqueId())
if self.getAccessKey():
# Material has an access key => require this key
if not key:
return False
return self.__ac.canKeyAccess(key)
elif self.getConference():
# If it has no key we check the conference's key
conf_key = session.get('accessKeys', {}).get(self.getConference().getUniqueId())
return self.getConference().canKeyAccess(aw, conf_key)
return False
def grantModification( self, prin ):
self.__ac.grantModification( prin )
if isinstance(prin, AvatarUserWrapper):
prin.linkTo(self, "manager")
self._p_changed = 1
def revokeModification( self, prin ):
self.__ac.revokeModification( prin )
if isinstance(prin, AvatarUserWrapper):
prin.unlinkTo(self, "manager")
self._p_changed = 1
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user) or (self.getConference() and self.getConference().canKeyModify())
def canUserModify( self, user ):
"""Tells whether a user is allowed to modify the current contribution:
only if the user is granted to modify the contribution or the user
can modify any of its upper objects (i.e. conference or session).
"""
return self.getOwner().canUserModify( user )
def getModifKey( self ):
return self.getConference().getModifKey()
def getManagerList( self ):
return self.__ac.getModifierList()
def getAllowedToAccessList( self ):
return self.__ac.getAccessList()
def requireDomain( self, dom ):
self.__ac.requireDomain( dom )
self._p_changed = 1
def freeDomain( self, dom ):
self.__ac.freeDomain( dom )
self._p_changed = 1
def getDomainList( self ):
return self.__ac.getRequiredDomainList()
def getAccessController(self):
return self.__ac
def isBuiltin(self):
return False
class BuiltinMaterial(Material):
"""
Non-customizable material types
"""
def isBuiltin(self):
return True
class Reviewing(BuiltinMaterial):
def __init__( self, materialData = None ):
Material.__init__( self, materialData )
self.id = "reviewing"
def setId( self, newId ):
return
def getContribution(self):
if isinstance(self.getOwner(), Review):
return self.getOwner().getContribution()
return Material.getContribution(self)
class Resource(CommonObjectBase):
"""This is the base class for representing individual resources which can
be included in material containers for lately being attached to
conference objects (i.e. conferences, sessions or contributions). This
class provides basic data and operations to handle this resources.
Resources can be of serveral types (files, links, ...) which means
different specialisations of this class.
Attributes:
id -- (string) Allows to assign the resource a unique identifier. It
is normally used to uniquely identify the resource among other
resources included in a certain material.
name -- (string) Short description about the purpose or the contents
of the resource.
description - (string) detailed and varied information about the
resource.
__owner - (Material) reference to the material object in which the
current resource is included.
"""
fossilizes(IResourceMinimalFossil, IResourceFossil)
def __init__( self, resData = None ):
self.id = "not assigned"
self.name = ""
self.description = ""
self._owner = None
self.__ac = AccessController(self)
self.pdfConversionRequestDate = None
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def clone( self, conf, protection=True ):
res = self.__class__()
res.setName(self.getName())
res.setDescription(self.getDescription())
res.setOwner(conf)
res.notifyModification()
res.setId(self.getId())
if protection:
res.setProtection(self.getAccessController()._getAccessProtection())
#res.__ac = self.getAccessController()
return res
def notifyModification( self ):
parent = self.getOwner()
if parent:
parent.setModificationDate()
self._p_changed = 1
def getLocator( self ):
if self._owner == None:
return Locator()
lconf = self._owner.getLocator()
lconf["resId"] = self.getId()
return lconf
def setId( self, newId ):
self.id = newId.strip()
def getId( self ):
return self.id
def getUniqueId( self ):
"""returns (string) the unique identifier of the item
used mainly in the web session access key table
for resources, it is the same as the father material since
only the material can be protected with an access key"""
return self.getOwner().getUniqueId()
def setOwner(self, newOwner):
self._owner = newOwner
def getOwner( self ):
return self._owner
def getCategory( self ):
#raise "%s:%s:%s"%(self.getOwner(), Material, isinstance(self.getOwner, Material))
if isinstance(self.getOwner(), Category):
return self.getOwner()
if isinstance(self.getOwner(), Material):
return self.getOwner().getCategory()
return None
def getConference( self ):
# this check owes itself to the fact that some
# protection checking functions call getConference()
# directly on resources, without caring whether they
# are owned by Conferences or Categories
if self._owner is None or isinstance(self._owner, Category):
return None
else:
return self._owner.getConference()
def getSession( self ):
return self._owner.getSession()
def getContribution( self ):
return self._owner.getContribution()
def getSubContribution( self ):
return self._owner.getSubContribution()
@Updates (['MaKaC.conference.Link',
'MaKaC.conference.LocalFile'], 'name')
def setName( self, newName ):
self.name = newName.strip()
self.notifyModification()
def getName( self ):
return self.name
@Updates (['MaKaC.conference.Link',
'MaKaC.conference.LocalFile'], 'description')
def setDescription( self, newDesc ):
self.description = newDesc.strip()
self.notifyModification()
def getDescription( self ):
return self.description
def archive( self, repository = None, forcedFileId = None ):
"""performs necessary operations to ensure the archiving of the
resource. By default is doing nothing as the persistence of the
system already ensures the archiving of the basic resource data"""
return
def delete(self):
if self._owner is not None:
self.notify_protection_to_owner(delete=True)
self._owner.removeResource(self)
self.__ac.unlinkAvatars('access')
self._owner = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def isProtected(self):
# tells if a resource is protected or not
return (self.hasProtectedOwner() + self.getAccessProtectionLevel()) > 0
def getAccessProtectionLevel( self ):
return self.__ac.getAccessProtectionLevel()
def isItselfProtected( self ):
return self.__ac.isItselfProtected()
def hasProtectedOwner( self ):
if self.getOwner() != None:
return self.getOwner().isProtected()
return False
def notify_protection_to_owner(self, delete=False):
# Resources can be attached to other objects (e.g. Registrant),
# but we wish to trigger the notification only when attached to materials (except paper reviewing)
if isinstance(self.getOwner(), Material) and not isinstance(self.getOwner(), Reviewing):
self.getOwner().updateNonInheritingChildren(self, delete)
@Updates (['MaKaC.conference.Link',
'MaKaC.conference.LocalFile'],'protection', lambda(x): int(x))
def setProtection( self, private ):
self.__ac.setProtection( private )
self.notify_protection_to_owner()
def grantAccess( self, prin ):
self.__ac.grantAccess( prin )
def revokeAccess( self, prin ):
self.__ac.revokeAccess( prin )
def canView( self, aw ):
"""tells whether the specified user has access to the current object
or any of its sub-objects
"""
return self.canAccess( aw )
def isAllowedToAccess( self, user ):
return self.__ac.canUserAccess( user ) or self.canUserModify( user ) or (not self.isItselfProtected() and self.getOwner().isAllowedToAccess( user ))
def canAccess( self, aw ):
# Allow harvesters (Invenio, offline cache) to access
# protected pages
if has_request_context() and self.__ac.isHarvesterIP(request.remote_addr):
return True
#####################################################
# Managers have always access
if self.canModify(aw):
return True
if not self.canIPAccess(request.remote_addr) and not self.canUserModify(aw.getUser()) and \
not self.isAllowedToAccess(aw.getUser()):
return False
if not self.isProtected():
return True
flag = self.isAllowedToAccess( aw.getUser() )
return flag or self.canKeyAccess(aw) or self.getOwner().canKeyAccess(aw) or \
(self.getConference() != None and self.getConference().canKeyAccess(aw) and self.getAccessKey() == "") or \
(self.getConference() != None and self.getConference().canKeyAccess(aw) and self.getAccessKey() == self.getConference().getAccessKey())
def grantModification( self, prin ):
self.__ac.grantModification( prin )
def revokeModification( self, prin ):
self.__ac.revokeModification( prin )
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user) or (self.getConference() and self.getConference().canKeyModify())
def canUserModify( self, user ):
"""Tells whether a user is allowed to modify the current contribution:
only if the user is granted to modify the contribution or the user
can modify any of its upper objects (i.e. conference or session).
"""
return self.getOwner().canUserModify( user )
def getModifKey( self ):
return self.getConference().getModifKey()
def getManagerList( self ):
return self.__ac.getModifierList()
def getAllowedToAccessList( self ):
return self.__ac.getAccessList()
def getURL( self ):
return ""
def requireDomain( self, dom ):
self.__ac.requireDomain( dom )
def freeDomain( self, dom ):
self.__ac.freeDomain( dom )
def getDomainList( self ):
return self.__ac.getRequiredDomainList()
def getAccessController(self):
return self.__ac
def getAccessKey(self):
if self.getOwner() is not None:
return self.getOwner().getAccessKey()
return ""
def canKeyAccess(self, aw):
accessKey = self.getAccessKey()
key = session.get('accessKeys', {}).get(self.getUniqueId())
if not key:
return False
elif accessKey and key == accessKey:
return True
elif not accessKey and key == self.getConference().getAccessKey():
return True
return False
def getReviewingState(self):
""" Returns the reviewing state of a resource, which is the reviewing state of the material to which it belongs.
The state is represented by an integer:
0 : there's no reviewing state because the resource doesn't belong to a material,
the material does not belong to a contribution, or the conference does not have reviewing.
1 : the material is not subject to reviewing, because this kind of material is not reviewable in the conference
2 : the material is subject to reviewing, but has not been submitted yet by the author
3 : the material is subject to reviewing, has been submitted by the author, but has not been judged yet
4 : the material is subject to reviewing, has been submitted by the author, and has been judged as Accepted
5 : the material is subject to reviewing, has been submitted by the author, and has been judged as Rejected
"""
if isinstance(self.getOwner(), Material):
return self.getOwner().getReviewingState()
else: #ressource does not belong to a material
return 0
def setPDFConversionRequestDate( self, newPdfConversionRequestDate ):
self.pdfConversionRequestDate = newPdfConversionRequestDate
def getPDFConversionStatus(self):
if not hasattr(self, "pdfConversionRequestDate"):
self.pdfConversionRequestDate = None
if self.pdfConversionRequestDate is not None and self.pdfConversionRequestDate + timedelta(seconds=50) > nowutc() :
return 'converting'
return None
class LocalFile(Resource):
"""Specialises Resource class in order to represent files which can be
stored in the system. The user can choose to use the system as an
archive of electronic files so he may want to attach a file which is
in his computer to a conference so it remains there and must be kept
in the system. This object contains the file basic metadata and provides
the necessary operations to ensure the corresponding file is archived
(it uses one of the file repositories of the system to do so) and keeps
the reference for being able to access it afterwards.
Params:
fileName -- (string) Name of the file. Normally the original name of
the user submitted file is kept.
filePath -- (string) If it is set, it contains a local path to the
file submitted by the user and uploaded in the system. This
attribute is only temporary used so it keeps a pointer to a
temporary uploaded file.
__repository -- (FileRep) Once a file is archived, it is kept in a
FileRepository for long term. This attribute contains a pointer
to the file repository where the file is kept.
__archivedId -- (string) It contains a unique identifier for the file
inside the repository where it is archived.
"""
fossilizes(ILocalFileMinimalFossil, ILocalFileFossil, ILocalFileExtendedFossil, ILocalFileAbstractMaterialFossil)
def __init__( self, resData = None ):
Resource.__init__( self, resData )
self.fileName= ""
self.fileType = ""
self.filePath = ""
self.__repository = None
self.__archivedId = ""
def clone( self, conf, protection=True ):
localfile = Resource.clone(self, conf, protection)
localfile.setFilePath(self.getFilePath())
localfile.setFileName(self.getFileName())
return localfile
def getLocator(self):
locator = Resource.getLocator(self)
try:
locator['fileExt'] = (self.fileType.lower() or
os.path.splitext(self.fileName)[1].lower().lstrip('.') or None)
except Exception:
locator['fileExt'] = 'bin' # no extension => use a dummy
return locator
def setFileName( self, newFileName, checkArchive=True ):
"""While the file is not archived sets the file name of the current
object to the one specified (if a full path is specified the
base name is extracted) replacing on it blanks by underscores.
"""
if checkArchive and self.isArchived():
raise MaKaCError( _("The file name of an archived file cannot be changed"), _("File Archiving"))
#Using os.path.basename is not enough as it only extract filenames
# correclty depending on the server platform. So we need to convert
# to the server platform and apply the basename extraction. As I
# couldn't find a python function for this this is done manually
# although it can contain errors
#On windows basename function seems to work properly with unix file
# paths
if newFileName.count("/"):
#unix filepath
newFileName = newFileName.split("/")[-1]
else:
#windows file path: there "/" is not allowed on windows paths
newFileName = newFileName.split("\\")[-1]
self.fileName = newFileName.strip().replace(" ", "_")
def getFileName( self ):
return self.fileName
def getFileType( self ):
fileExtension = os.path.splitext( self.getFileName() )[1]
if fileExtension != "":
fileExtension = fileExtension[1:]
cfg = Config.getInstance()
if cfg.getFileType( fileExtension ) != "":
return cfg.getFileType( fileExtension )
else:
return fileExtension
def setFilePath( self, filePath ):
if self.isArchived():
raise MaKaCError( _("The path of an archived file cannot be changed"), _("File Archiving"))
if not os.access( filePath.strip(), os.F_OK ):
raise Exception( _("File does not exist : %s")%filePath.strip())
self.filePath = filePath.strip()
def getCreationDate( self):
return self.__repository.getCreationDate(self.__archivedId)
def getFilePath( self ):
if not self.isArchived():
return self.filePath
return self.__repository.getFilePath(self.__archivedId)
def getSize( self ):
if not self.isArchived():
return int(os.stat(self.getFilePath())[stat.ST_SIZE])
return self.__repository.getFileSize( self.__archivedId )
def setArchivedId( self, rep, id ):
self.__repository = rep
self.__archivedId = id
def getRepositoryId( self ):
return self.__archivedId
def setRepositoryId(self, id):
self.__archivedId = id
def isArchived( self ):
return self.__repository != None and self.__archivedId != ""
def readBin( self ):
if not self.isArchived():
raise MaKaCError( _("File not available until it has been archived") , _("File Archiving"))
return self.__repository.readFile( self.__archivedId )
def archive( self, repository=None, forcedFileId = None ):
if self.isArchived():
raise Exception( _("File is already archived"))
if not repository:
raise Exception( _("Destination repository not set"))
if self.filePath == "":
return _("Nothing to archive")
repository.storeFile( self, forcedFileId = forcedFileId)
self.filePath = ""
self.notifyModification()
def recover(self):
if not self.isArchived():
raise Exception( _("File is not archived, so it cannot be recovered."))
if not self.__repository:
raise Exception( _("Destination repository not set."))
self.__repository.recoverFile(self)
Resource.recover(self)
self.notifyModification()
def delete( self ):
if not self.isArchived():
os.remove( self.getFilePath() )
try:
self.__repository.retireFile( self )
except AttributeError, e:
pass
Resource.delete( self )
def getRepository(self):
return self.__repository
def __str__( self ):
return self.getFileName()
class TCIndex( Persistent ):
"""Index for conference track coordinators.
This class allows to index conference track coordinators so the owner
can answer optimally to the query if a user is coordinating
any conference track.
It is implemented by simply using a BTree where the Avatar id is used
as key (because it is unique and non variable) and a list of
coordinated tracks is kept as keys. It is the responsability of the
index owner (conference) to keep it up-to-date i.e. notify track
coordinator additions and removals.
"""
def __init__( self ):
self._idx = OOBTree()
def getTracks( self, av ):
"""Gives a list with the tracks a user is coordinating.
"""
if av == None:
return []
return self._idx.get( av.getId(), [] )
def indexCoordinator( self, av, track ):
"""Registers in the index a coordinator of a track.
"""
if av == None or track == None:
return
if not self._idx.has_key( av.getId() ):
l = []
else:
l = self._idx[av.getId()]
if track not in l:
l.append(track)
# necessary, otherwise ZODB won't know it needs to update the BTree
self._idx[av.getId()] = l
self.notifyModification()
def unindexCoordinator( self, av, track ):
if av == None or track == None:
return
l = self._idx.get( av.getId(), [] )
if track in l:
l.remove( track )
self._idx[av.getId()] = l
self.notifyModification()
def notifyModification(self):
self._p_changed = 1
class Track(CoreObject):
def __init__( self ):
self.conference = None
self.id = "not assigned"
self.title = ""
self.description = ""
self.subTracks = {}
self.__SubTrackGenerator = Counter()
self._abstracts = OOBTree()
self._coordinators = []
self._contributions = OOBTree()
self._code=""
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def clone(self, conference):
tr = Track()
tr.setConference(conference)
tr.setTitle(self.getTitle())
tr.setCode(self.getCode())
tr.setDescription(self.getDescription())
for co in self.getCoordinatorList() :
tr.addCoordinator(co)
for subtr in self.getSubTrackList() :
tr.addSubTrack(subtr.clone())
return tr
def delete( self ):
"""Deletes a track from the system. All the associated abstracts will
also be notified so the track is no longer associated to them.
"""
#XXX: Should we allow to delete a track when there are some abstracts
# or contributions submitted for it?!?!?!?!
# we must notify each abstract in the track about the deletion of the
# track
while len(self._abstracts)>0:
k = self._abstracts.keys()[0]
abstract = self._abstracts[k]
del self._abstracts[k]
self._p_changed = True
abstract.removeTrack( self )
# we must notify each contribution in the track about the deletion of the
# track
while len(self._contributions)>0:
k = self._contributions.keys()[0]
contrib = self._contributions[k]
del self._contributions[k]
self._p_changed = True
contrib.removeTrack( self )
# we must delete and unindex all the possible track coordinators
while len(self._coordinators)>0:
self.removeCoordinator(self._coordinators[0])
# we must notify the conference about the track deletion
if self.conference:
conf = self.conference
self.conference = None
conf.removeTrack( self )
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def canModify(self, aw_or_user):
return self.conference.canModify(aw_or_user)
def canUserModify( self, av ):
return self.conference.canUserModify( av )
def canView( self, aw ):
return self.conference.canView( aw )
def notifyModification( self ):
parent = self.getConference()
if parent:
parent.setModificationDate()
self._p_changed = 1
def getLocator( self ):
"""Gives back a globaly unique identification encapsulated in a Locator
object for the track instance
"""
if self.conference == None:
return Locator()
lconf = self.conference.getLocator()
lconf["trackId"] = self.getId()
return lconf
def setConference(self, conference):
self.conference = conference
def getConference( self ):
return self.conference
def getOwner( self ):
return self.getConference()
def setId( self, newId ):
self.id = str(newId)
def getId( self ):
return self.id
def setTitle( self, newTitle ):
self.title = newTitle
self.notifyModification()
def getTitle( self ):
return self.title
def setDescription(self, newDescription ):
self.description = newDescription
self.notifyModification()
def getDescription(self):
return self.description
def getCode(self):
try:
if self._code:
pass
except AttributeError:
self._code=self.id
return self._code
def setCode(self,newCode):
self._code=str(newCode).strip()
def __generateNewSubTrackId( self ):
return str(self.__SubTrackGenerator.newCount())
def addSubTrack( self, newSubTrack ):
"""Registers the contribution passed as parameter within the session
assigning it a unique id.
"""
if newSubTrack in self.subTracks.values():
return
subTrackId = newSubTrack.getId()
if subTrackId == "not assigned":
subTrackId = self.__generateNewSubTrackId()
self.subTracks[subTrackId] = newSubTrack
newSubTrack.setTrack( self )
newSubTrack.setId( subTrackId )
self.notifyModification()
def removeSubTrack( self, subTrack ):
"""Removes the indicated contribution from the session
"""
if subTrack in self.subTracks.values():
del self.subTracks[ subTrack.getId() ]
self._p_changed = True
subTrack.setTrack( None )
subTrack.delete()
self.notifyModification()
def recoverSubTrack(self, subTrack):
self.addSubTrack(subTrack)
subTrack.recover()
def newSubTrack( self ):
st = SubTrack()
self.addSubTrack( st )
return st
def getSubTrackById( self, id ):
if self.subTracks.has_key( id ):
return self.subTracks[ id ]
return None
def getSubTrackList( self ):
return self.subTracks.values()
def getAbstractList( self ):
"""
"""
try:
if self._abstracts:
pass
except AttributeError:
self._abstracts = OOBTree()
return self._abstracts.values()
def getAbstractById( self, id ):
try:
if self._abstracts:
pass
except AttributeError:
self._abstracts = OOBTree()
return self._abstracts.get(str(id).strip())
def hasAbstract( self, abstract ):
"""
"""
try:
if self._abstracts:
pass
except AttributeError:
self._abstracts = OOBTree()
return self._abstracts.has_key( abstract.getId() )
def addAbstract( self, abstract ):
"""Adds an abstract to the track abstract list.
Notice that this method doesn't notify the abstract about the track
addition.
"""
if not self.hasAbstract( abstract ):
self._abstracts[ abstract.getId() ] = abstract
#abstract.addTrack( self )
def removeAbstract( self, abstract ):
"""Removes an abstract from the track abstract list.
Notice that this method doesn't notify the abstract about the track
removal.
"""
if self.hasAbstract( abstract ):
del self._abstracts[ abstract.getId() ]
self._p_changed = True
#abstract.removeTrack( self )
def addCoordinator( self, av ):
"""Grants coordination privileges to user.
Arguments:
av -- (AvatarUserWrapper) the user to which
coordination privileges must be granted.
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators = []
self.notifyModification()
if not (av in self._coordinators):
self._coordinators.append( av )
self.getConference().addTrackCoordinator( self, av )
av.linkTo(self, "coordinator")
self.notifyModification()
def removeCoordinator( self, av ):
"""Revokes coordination privileges to user.
Arguments:
av -- (AvatarUserWrapper) user for which coordination privileges
must be revoked
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators = []
self.notifyModification()
if av in self._coordinators:
self._coordinators.remove( av )
self.getConference().removeTrackCoordinator( self, av )
av.unlinkTo(self, "coordinator")
self.notifyModification()
def isCoordinator( self, av ):
"""Tells whether the specified user is a coordinator of the track.
Arguments:
av -- (AvatarUserWrapper) user to be checke
Return value: (boolean)
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators = []
return av in self._coordinators
def getCoordinatorList( self ):
"""Return all users which have privileges to coordinate the track.
Return value: (list)
"""
try:
if self._coordinators:
pass
except AttributeError, e:
self._coordinators = []
return self._coordinators
def canCoordinate( self, aw ):
"""Tells if a user has coordination privileges.
Only track coordinators have coordination privileges over a track.
Params:
aw -- (MaKaC.accessControl.AccessWrapper) User access
information for which the coordination privileges must be
checked.
Return value: (boolean)
"""
return self.isCoordinator( aw.getUser() ) or self.canModify( aw )
def addContribution( self, newContrib ):
"""
"""
try:
if self._contributions:
pass
except AttributeError:
self._contributions = OOBTree()
if self._contributions.has_key( newContrib.getId() ):
return
self._contributions[ newContrib.getId() ] = newContrib
newContrib.setTrack( self )
def getModifKey( self ):
return self.getConference().getModifKey()
def removeContribution( self, contrib ):
"""
"""
try:
if self._contributions:
pass
except AttributeError:
self._contributions = OOBTree()
if not self._contributions.has_key( contrib.getId() ):
return
del self._contributions[ contrib.getId() ]
self._p_changed = True
contrib.setTrack( None )
def hasContribution( self, contrib ):
try:
if self._contributions:
pass
except AttributeError:
self._contributions = OOBTree()
return self._contributions.has_key( contrib.getId() )
def getContributionList(self):
try:
if self._contributions:
pass
except AttributeError:
self._contributions = OOBTree()
return self._contributions.values()
def canUserCoordinate( self, av ):
return self.isCoordinator( av ) or self.canUserModify( av )
class SubTrack(CoreObject):
def __init__( self ):
self.track = None
self.id = "not assigned"
self.title = ""
self.description = ""
def clone(self):
sub = SubTrack()
sub.setDescription(self.getDescription())
sub.setTitle(self.getTitle())
return sub
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def canModify(self, aw_or_user):
return self.track.canModify(aw_or_user)
def canView( self, aw ):
return self.track.canView( aw )
def notifyModification( self ):
parent = self.getTrack()
if parent:
parent.setModificationDate()
self._p_changed = 1
def getLocator( self ):
"""Gives back a globaly unique identification encapsulated in a Locator
object for the session instance
"""
if self.track == None:
return Locator()
lconf = self.track.getLocator()
lconf["subTrackId"] = self.getId()
return lconf
def setTrack(self, track):
self.track = track
if track == None:
return
def getTrack( self ):
return self.track
def getOwner( self ):
return self.getTrack()
def setId( self, newId ):
self.id = str(newId)
def getId( self ):
return self.id
def setTitle( self, newTitle ):
self.title = newTitle
self.notifyModification()
def getTitle( self ):
return self.title
def setDescription(self, newDescription ):
self.description = newDescription
self.notifyModification()
def getDescription(self):
return self.description
class ContributionType(Persistent):
def __init__(self, name, description, conference):
self._id = ""
self._name = name
self._description = description
self._conference = conference
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getName(self):
return self._name
def setName(self, name):
self._name = name
def getDescription(self):
return self._description
def setDescription(self, desc):
self._description = desc
def getConference(self):
return self._conference
def setConference(self, conf):
self._conference = conf
def getLocator( self ):
if self._conference == None:
return Locator()
lconf = self._conference.getLocator()
lconf["contribTypeId"] = self.getId()
return lconf
def canModify(self, aw_or_user):
return self._conference.canModify(aw_or_user)
def delete(self):
self.setConference(None)
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def clone(self, conference ):
type = ContributionType(self.getName(), self.getDescription(),conference)
return type
class BOAConfig(Persistent):
"""Contains the configuration of the Book of Abstracts of a conference
"""
sortByTypes = {"number": L_("ID"),
"name": L_("Title"),
"sessionTitle": L_("Session title"),
"speaker": L_("Presenter"),
"schedule": L_("Schedule")}
correspondingAuthorTypes = {"none": L_("Nobody"),
"submitter": L_("Submitter"),
"speakers": L_("Speakers")}
def __init__(self,conf):
self._conf=conf
self._text=""
self._showIds= False
self._sortBy = "number"
self._correspondingAuthor = "submitter"
self._modificationDS = nowutc()
self._cache = False
def getText(self):
return self._text
def setText(self,newText):
self._text=newText.strip()
self._notifyModification()
def getShowIds(self):
if not hasattr(self, "_showIds"):
self._showIds=False
return self._showIds
def setShowIds(self,showIds):
self._showIds=showIds
self._notifyModification()
def getSortBy(self):
if not hasattr(self, "_sortBy"):
self._sortBy="number"
return self._sortBy
def setSortBy(self,sortBy):
self._sortBy=sortBy
self._notifyModification()
@staticmethod
def getSortByTypes():
return BOAConfig.sortByTypes
def getCorrespondingAuthor(self):
if not hasattr(self, "_correspondingAuthor"):
self._correspondingAuthor = "submitter"
return self._correspondingAuthor
def setCorrespondingAuthor(self, correspondingAuthor):
self._correspondingAuthor = correspondingAuthor
self._notifyModification()
@staticmethod
def getCorrespondingAuthorTypes():
return BOAConfig.correspondingAuthorTypes
def isCacheEnabled(self):
if not hasattr(self, '_cache'):
self._cache = False
return self._cache
def setCache(self, value):
self._cache = value;
def _notifyModification(self):
self._modificationDS = nowutc()
@property
def lastChanged(self):
if not hasattr(self, '_modificationDS'):
self._modificationDS = nowutc()
return self._modificationDS
class EventCloner(object):
"""Base class to let plugins/modules plug into the event cloning mechanism"""
@staticmethod
def get_plugin_items(event):
"""Returns the items/checkboxes for the clone options provided by EventCloner"""
plugin_options = []
for plugin_cloner in values_from_signal(signals.event_management.clone.send(event), single_value=True):
with plugin_context(plugin_cloner.plugin):
for name, (title, enabled, checked) in plugin_cloner.get_options().iteritems():
full_name = plugin_cloner.full_option_name(name)
plugin_options.append((
title,
"""<li><input type="checkbox" name="cloners" id="cloner-{0}" value="{0}" {2} {3}>{1}</li>"""
.format(full_name, title,
'disabled' if not enabled else '',
'checked' if checked and enabled else '')
))
return '\n'.join(x[1] for x in sorted(plugin_options))
@staticmethod
def clone_event(old_event, new_event):
"""Calls the various cloning methods"""
selected = set(request.values.getlist('cloners'))
for plugin_cloner in values_from_signal(signals.event_management.clone.send(old_event), single_value=True):
with plugin_context(plugin_cloner.plugin):
selected_options = {name for name, (_, enabled, _) in plugin_cloner.get_options().iteritems()
if enabled and plugin_cloner.full_option_name(name) in selected}
plugin_cloner.clone(new_event, selected_options)
def __init__(self, event, plugin=None):
self.event = event
self.plugin = plugin
def full_option_name(self, option):
return '{}-{}'.format(self.__module__, option)
def get_options(self):
"""Returns a dict containing the clone options.
:return: dict mapping option names to ``title, enabled, checked`` tuples
"""
raise NotImplementedError
def clone(self, new_event, options):
"""Performs the actual cloning.
This method is always called, even if no options are selected!
:param new_event: The new event created during the clone
:param options: A set containing the options provided by
this class which the user has selected
"""
raise NotImplementedError
| XeCycle/indico | indico/MaKaC/conference.py | Python | gpl-3.0 | 377,852 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Train a Fast-RCNN model on the PASCAL VOC dataset.
This Fast-RCNN is based on VGG16 that was pre-trained using ImageI1K.
By default, the script will download the pre-trained VGG16 from neon model zoo
and seed the convolution and pooling layers. And Fast R-CNN starts training from
that. If the script is given --model_file, it will continue training the
Fast R-CNN from the given model file.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/train.py -e 20 --save_path frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support training/testing with small
batch size such as, 2 or 3 images per batch. The model training will converge
around 20 epochs. With 3 images per batch, and 64 ROIs per image, the training
consumes about 11G memory.
2. The original caffe model goes through 40000 iteration (mb) of training, with
2 images per minibatch.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
"""
from neon import logger as neon_logger
from neon.backends import gen_backend
from neon.data import PASCALVOCTrain
from neon.transforms import CrossEntropyMulti, SmoothL1Loss, ObjectDetection
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.callbacks.callbacks import Callbacks
from neon.layers import Multicost, GeneralizedCostMask
from neon.util.persist import save_obj
from util import load_vgg_weights, create_frcn_model, scale_bbreg_weights
# main script
# parse the command line arguments
parser = NeonArgparser(__doc__, default_overrides=dict(batch_size=4))
parser.add_argument('--subset_pct', type=float, default=100,
help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_vgg.pkl'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
# hyperparameters
args.batch_size = 4
num_epochs = args.epochs
n_mb = None
img_per_batch = args.batch_size
rois_per_img = 64
frcn_fine_tune = False
learning_rate_scale = 1.0 / 10
if frcn_fine_tune is True:
learning_rate_scale = 1.0 / 16
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
if args.backend == 'gpu':
be.enable_winograd = 4
if be.gpu_memory_size < 11 * 1024 * 1024 * 1024:
exit("ERROR: This model requires at least 11GB GPU memory to be run.")
# setup training dataset
train_set = PASCALVOCTrain('trainval', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False, subset_pct=args.subset_pct)
test_set = PASCALVOCTrain('test', '2007', path=args.data_dir, n_mb=n_mb,
img_per_batch=img_per_batch, rois_per_img=rois_per_img,
rois_random_sample=True,
add_flipped=False)
# setup model
model = create_frcn_model(frcn_fine_tune)
# setup optimizer
opt_w = GradientDescentMomentum(
0.001 * learning_rate_scale, 0.9, wdecay=0.0005)
opt_b = GradientDescentMomentum(0.002 * learning_rate_scale, 0.9)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b})
# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
load_vgg_weights(model, args.data_dir)
cost = Multicost(costs=[GeneralizedCostMask(costfunc=CrossEntropyMulti()),
GeneralizedCostMask(costfunc=SmoothL1Loss())],
weights=[1, 1])
callbacks = Callbacks(model, eval_set=test_set, **args.callback_args)
model.fit(train_set, optimizer=optimizer,
num_epochs=num_epochs, cost=cost, callbacks=callbacks)
# Fast R-CNN model requires scale the bbox regression branch linear layer weights
# before saving the model
model = scale_bbreg_weights(
model, train_set.bbtarget_means, train_set.bbtarget_stds)
save_obj(model.serialize(keep_states=True), args.save_path)
neon_logger.display('running eval...')
metric_train = model.eval(train_set, metric=ObjectDetection())
neon_logger.display(
'Train: label accuracy - {}%, object detection logloss - {}'.format(metric_train[0] * 100,
metric_train[1]))
metric_test = model.eval(test_set, metric=ObjectDetection())
neon_logger.display(
'Test: label accuracy - {}%, object detection logloss - {}'.format(metric_test[0] * 100,
metric_test[1]))
| Jokeren/neon | examples/fast-rcnn/train.py | Python | apache-2.0 | 5,923 |
import sqlite3
from flask_restplus import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
# Parameter parsing
parser = reqparse.RequestParser()
parser.add_argument('username',
type = str,
required = True,
help = "Username is required!" )
parser.add_argument('password',
type = str,
required = True,
help = "Password is required!" )
def post(self):
data = UserRegister.parser.parse_args()
# Preventing user duplication
if UserModel.find_by_username(data['username']) is not None:
return {"message" : "User with that username already exists."}, 400
else:
# Connection
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
# id is auto-incrementing so it needs to be setup to null
register_query = "INSERT INTO users VALUES (NULL, ?, ?)"
cursor.execute(register_query, (data['username'], data['password'],))
connection.commit()
connection.close()
return {"message": "User created successfully!"}, 201
| arcyfelix/Courses | 18-04-18-REST APIs with Flask and Python/Section 6 - Simplifying storage with Flask-SQLAlchemy/1_Improving code structure/resources/user.py | Python | apache-2.0 | 1,035 |
import logging
from os.path import (dirname, abspath, join)
import math
import numpy
import sys
import time
from binly.utils.resource import Resource
class Servo(Resource):
# Min pulse length out of 4096.
DEFAULT_SERVO_MIN = 90
# Max pulse length out of 4096.
DEFAULT_SERVO_MAX = 545
# Max number of servo units to change per time unit.
DEFAULT_MAX_SERVO_RATE = 10
# Number of seconds between servo movement updates.
SERVO_UPDATE_SLEEP_SECONDS = 1.0 / 30.0
# Servo pulse width modulation frequency in Hertz.
SERVO_PWM_FREQ_HZ = 50
def __init__(self, name, servo_channel, min_value, max_value,
initial_value, servo_min=DEFAULT_SERVO_MIN,
servo_max=DEFAULT_SERVO_MAX,
max_servo_rate=DEFAULT_MAX_SERVO_RATE,
fake=False, *vargs, **kwargs):
super(Servo, self).__init__(*vargs, **kwargs)
self.name = name
self.servo_channel = servo_channel
self.min_value = min_value
self.max_value = max_value
self.servo_min = servo_min
self.servo_max = servo_max
self.max_servo_rate = max_servo_rate
if fake:
self._pwm = FakePCA9685(self.name)
else:
path = join(dirname(dirname(dirname(dirname(
abspath(__file__))))),
'submodules', 'Adafruit_Python_PCA9685')
if path not in sys.path:
sys.path.append(path)
import Adafruit_PCA9685
self._pwm = Adafruit_PCA9685.PCA9685()
self._pwm.set_pwm_freq(self.SERVO_PWM_FREQ_HZ)
# Initialize the servo to the initial value.
self.set_servo_value(self.scale_value(
initial_value, self.min_value, self.max_value,
self.servo_min, self.servo_max))
def start(self):
self.start_processing_incoming_messages()
def handle_incoming_message(self, topic, value):
logging.debug('Received %s message on topic "%s": %s' %
(self.name, topic, value))
# Validate the requested value.
new_value = self.validate_value(
self.name,
value, self.min_value, self.max_value)
# Make the servo move.
new_servo_value = self.scale_value(
new_value, self.min_value, self.max_value,
self.servo_min, self.servo_max)
for servo_value in self.smooth_value_series(self.servo_value,
new_servo_value,
self.max_servo_rate):
self.set_servo_value(int(servo_value))
time.sleep(self.SERVO_UPDATE_SLEEP_SECONDS)
def set_servo_value(self, value):
logging.debug('Setting servo value to: %s' % (value))
self._pwm.set_pwm(self.servo_channel, 0, value)
self.servo_value = value
@staticmethod
def smooth_value_series(start_value, end_value, max_rate):
num_samples = int(
math.ceil(abs(float(start_value - end_value)) / max_rate)) + 1
return numpy.linspace(start_value, end_value, num_samples)
class FakePCA9685(object):
""" Implements the same interface as the Adafruit_PCA9685.PCA9685, but
none of the methods do anything.
"""
def __init__(self, name, *vargs, **kwargs):
super(FakePCA9685, self).__init__(*vargs, **kwargs)
self.name = name
def set_pwm_freq(self, freq_hz):
logging.debug('%s: FakePCA9685.set_pwm_freq(%d)' %
(self.name, freq_hz))
def set_pwm(self, channel, on, off):
logging.debug('%s: FakePCA9685.set_pwm(%d, %d, %d)' %
(self.name, channel, on, off))
def set_all_pwm(self, on, off):
logging.debug('%s: FakePCA9685.set_all_pwm(%d, %d)' %
(self.name, on, off))
| morgangalpin/binly | binly/platform/resources/servo.py | Python | gpl-3.0 | 3,874 |
from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
elif value is not None:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/django/forms/utils.py | Python | mit | 6,149 |
import xml.etree.ElementTree as ET
import os
from Element import Element
class PythonToGMX(object):
def __init__(self, pythonTree):
self.pythonroot = pythonTree
self.root = ET.Element(eval(self.pythonroot.tag))
for child in self.pythonroot.children:
self.process(child, self.root)
def process(self, element, parent):
elem = ET.SubElement(parent, eval(element.tag), element.attrib)
elem.text = eval(element.text)
for child in element.children:
self.process(child, elem)
| Karuji/GMProjectImporter | PythonToGMX.py | Python | mit | 493 |
import roomai
import roomai.games.common
import roomai.games.kuhnpoker
import random
import unittest
class KuhnPokerExamplePlayer(roomai.games.common.AbstractPlayer):
def receive_info(self, info):
if info.person_state_history[-1].available_actions is not None:
self.available_actions = info.person_state_history[-1].available_actions
def take_action(self):
values = self.available_actions.values()
return list(values)[int(random.random() * len(values))]
def reset(self):
pass
class KuhnTester(unittest.TestCase):
def testKuhn(self):
players = [KuhnPokerExamplePlayer() for i in range(2)] + [roomai.games.common.RandomPlayerChance()]
# RandomChancePlayer is the chance player with the uniform distribution over every output
env = roomai.games.kuhnpoker.KuhnPokerEnv()
scores = env.compete_silent(env, players)
print(scores) | roomai/RoomAI | tests/ReadMe.py | Python | mit | 926 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hbase import hbase
from hbase_service import hbase_service
from hbase_decommission import hbase_decommission
import upgrade
class HbaseRestGatewayServer(Script):
def get_component_name(self):
return "hbase-restserver"
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hbase(name='rest')
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
upgrade.prestart(env, "hbase-restserver")
def post_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # for security
hbase_service( 'rest',
action = 'start'
)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
hbase_service( 'rest',
action = 'stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
pid_file = format("{pid_dir}/hbase-{hbase_user}-rest.pid")
check_process_status(pid_file)
def decommission(self, env):
import params
env.set_params(params)
hbase_decommission(env)
if __name__ == "__main__":
HbaseRestGatewayServer().execute()
| alexryndin/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/hbase_restgatewayserver.py | Python | apache-2.0 | 2,174 |
# -*- coding: utf-8 -*-
from impl import *
| ibelikov/jimmy | jimmy/modules/throttle/__init__.py | Python | apache-2.0 | 43 |
# Copyright (C) 2011 REES Marche <http://www.reesmarche.org>
#
# This file is part of ``django-simple-accounting``.
# ``django-simple-accounting`` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# ``django-simple-accounting`` is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ``django-simple-accounting``. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ValidationError
from simple_accounting.models import Transaction, CashFlow, Split, LedgerEntry
from simple_accounting.models import AccountType
from simple_accounting.exceptions import MalformedTransaction
def transaction_details(transaction):
"""
Take a ``Transaction`` model instance and return a detailed, human-readable string representation of it.
"""
display_str = ""
display_str += "Trasanction # %s\n\n" % transaction.pk
display_str += "issuer: %s\n" % transaction.issuer
display_str += "issued on: %s\n" % transaction.date
display_str += "description %s\n" % transaction.description
display_str += "type: %s\n" % transaction.kind
display_str += "source account: %s\n" % transaction.source.account
display_str += "amount: %s\n" % transaction.source.amount
display_str += "is_split: %s\n" % transaction.is_split
display_str += "is_internal: %s\n" % transaction.is_internal
display_str += "is_simple: %s\n" % transaction.is_simple
display_str += "\nSPLITS: \n"
# display transaction splits
split_count = 0
for split in transaction.splits:
split_count += 1
display_str += "split # %s\n|n" % split_count
display_str += "exit point: %s\n" % split.exit_point
display_str += "entry point: %s\n" % split.entry_point
display_str += "target account: %s\n" % split.target.account
display_str += "amount: %s\n" % transaction.target.amount
return display_str
def register_split_transaction(source, splits, description, issuer, date=None, kind=None):
"""
A factory function for registering general (split) transactions between accounts.
When invoked, this function takes care of the following tasks:
* create a new ``Transaction`` model instance from the given input arguments
* for each account involved in the transaction, add an entry
to the corresponding ledger (as a ``LedgerEntry`` instance).
Arguments
=========
``source``
A ``CashFlow`` model instance specifying the source account for the transaction
and the amount of money flowing from/to it
``splits``
An iterable of ``Split`` model instances, representing the flow components
(a.k.a. *splits*) from which the transaction is made. They must satisfy all the compatibility
constraints descending from the reference accounting model (for details,
see ``Transaction`` model's docstring)
``description``
A string describing what the transaction stands for
``issuer``
The economic subject (a ``Subject`` model instance) who issued the transaction
``date``
A reference date for the transaction (as a ``DateTime`` object);
default to the current date & time
``kind``
A type specification for the transaction. It's an (optional) domain-specific string;
if specified, it must be one of the values listed in ``settings.TRANSACTION_TYPES``
Return value
============
If input is valid, return the newly created ``Transaction`` model instance;
otherwise, report to the client code whatever error(s) occurred during the processing,
by raising a ``MalformedTransaction`` exception.
"""
try:
transaction = Transaction()
transaction.source = source
transaction.description = description
transaction.issuer = issuer
transaction.date = date
transaction.kind = kind
transaction.save()
# set transaction splits
transaction.split_set = splits
except ValidationError, e:
err_msg = _(u"Transaction specs are invalid: %(specs)s. The following error(s) occured: %(errors)s")\
% {'specs':transaction_details(transaction), 'errors':str(e.message_dict)}
raise MalformedTransaction(err_msg)
## write ledger entries
# source account
LedgerEntry.objects.create(account=source.account, transaction=transaction, amount=-source.amount)
# splits
for split in splits:
if split.exit_point:
# the sign of a ledger entry depends on the type of account involved
sign = 1 if split.exit_point.base_type == AccountType.EXPENSE else -1
LedgerEntry.objects.create(account=split.exit_point, transaction=transaction, amount=sign*split.amount)
# the sign of a ledger entry depends on the type of account involved
sign = 1 if split.entry_point.base_type == AccountType.INCOME else -1
LedgerEntry.objects.create(account=split.entry_point, transaction=transaction, amount=sign*split.amount)
# target account
# note that, by definition, ``split.amount == - split.target.amount)
LedgerEntry.objects.create(account=split.target.account, transaction=transaction, amount=split.amount)
return transaction
def register_transaction(source_account, exit_point, entry_point, target_account, amount, description, issuer, date=None, kind=None):
"""
A factory function for registering (non-split) transactions between accounts
belonging to different accounting systems.
When invoked, this function takes care of the following tasks:
* create a new ``Transaction`` model instance from the given input arguments
* for each account involved in the transaction, add an entry
to the corresponding ledger (as a ``LedgerEntry`` instance).
Since this is supposed to be a non-split transaction, only two accounts are involved:
a source and a target. Moreover, since this transaction involves two different
accounting systems, both the exit-point account from the first system and
the entry-point account to the second system must be specified.
Arguments
=========
``source_account``
the source account for the transaction (a stock-like ``Account`` model instance)
``exit_point``
the exit-point from the first system (a flux-like ``Account`` model instance)
``entry_point``
the entry-point to the second system (a flux-like ``Account`` model instance)
``target_account``
the target account for the transaction (a stock-like ``Account`` model instance)
``amount``
the amount of money flowing between source and target accounts (as a signed decimal);
its sign determines the flows's direction with respect to the source account
(i.e., positive -> outgoing, negative -> incoming)
``description``
A string describing what the transaction stands for
``issuer``
The economic subject (a ``Subject`` model instance) who issued the transaction
``date``
A reference date for the transaction (as a ``DateTime`` object);
default to the current date & time
``kind``
A type specification for the transaction. It's an (optional) domain-specific string;
if specified, it must be one of the values listed in ``settings.TRANSACTION_TYPES``
Return value
============
If input is valid, return the newly created ``Transaction`` model instance;
otherwise, report to the client code whatever error(s) occurred during the processing,
by raising a ``MalformedTransaction`` exception.
"""
try:
transaction = Transaction()
# source flow
source = CashFlow.objects.create(account=source_account, amount=amount)
transaction.source = source
transaction.description = description
transaction.issuer = issuer
transaction.date = date
transaction.kind = kind
transaction.save()
# construct the (single) transaction split from input arguments
# target flow
target = CashFlow.objects.create(account=target_account, amount=-amount)
split = Split.objects.create(exit_point=exit_point, entry_point=entry_point, target=target)
# add this single split to the transaction
transaction.split_set = [split]
except ValidationError, e:
err_msg = _(u"Transaction specs are invalid: %(specs)s. The following error(s) occured: %(errors)s")\
% {'specs':transaction_details(transaction), 'errors':str(e.message_dict)}
raise MalformedTransaction(err_msg)
## write ledger entries
# source account
LedgerEntry.objects.create(account=source_account, transaction=transaction, amount=-amount)
# exit point account
# the sign of a ledger entry depends on the type of account involved
sign = 1 if exit_point.base_type == AccountType.EXPENSE else -1
LedgerEntry.objects.create(account=exit_point, transaction=transaction, amount=sign*amount)
# the sign of a ledger entry depends on the type of account involved
sign = 1 if entry_point.base_type == AccountType.INCOME else -1
LedgerEntry.objects.create(account=entry_point, transaction=transaction, amount=sign*amount)
# target account
LedgerEntry.objects.create(account=target_account, transaction=transaction, amount=amount)
return transaction
def register_internal_transaction(source, targets, description, issuer, date=None, kind=None):
"""
A factory function for registering internal transactions.
This is just a convenience version of ``register_split_transaction``,
to be used when dealing with internal transactions.
When invoked, this function takes care of the following tasks:
* create a new ``Transaction`` model instance from the given input arguments
* for each account involved in the transaction (i.e., ``source`` and ``targets``),
add an entry to the corresponding ledger (as a ``LedgerEntry`` instance).
For details about internal transactions, see ``Transaction`` model's docstring.
Arguments
=========
``source``
A ``CashFlow`` model instance specifying the source account for the transaction
and the amount of money flowing from/to it
``targets``
An iterable of ``CashFlow`` model instances, representing the flow components
(a.k.a. splits) from which the transaction is made.
Since we are dealing with an internal transaction, a split is fully defined
by the target account and the amount of money flowing to/from it
(so, a ``CashFlow`` rather than a ``Split`` instance).
``description``
A string describing what the transaction stands for
``issuer``
The economic subject (a ``Subject`` model instance) who issued the transaction
``date``
A reference date for the transaction (as a ``DateTime`` object);
default to the current date & time
``kind``
A type specification for the transaction. It's an (optional) domain-specific string;
if specified, it must be one of the values listed in ``settings.TRANSACTION_TYPES``
Return value
============
If input is valid, return the newly created ``Transaction`` model instance;
otherwise, report to the client code whatever error(s) occurred during the processing,
by raising a ``MalformedTransaction`` exception.
"""
try:
transaction = Transaction()
transaction.source = source
transaction.description = description
transaction.issuer = issuer
transaction.date = date
transaction.kind = kind
transaction.save()
# construct transaction splits from input arguments
splits = []
for target in targets:
# entry- & exit- points are missing, because this is an internal transaction
split = Split.objects.create(target=target)
splits.append(split)
# set transaction splits
transaction.split_set = splits
except ValidationError, e:
err_msg = _(u"Transaction specs are invalid: %(specs)s. The following error(s) occured: %(errors)s")\
% {'specs':transaction_details(transaction), 'errors':str(e.message_dict)}
raise MalformedTransaction(err_msg)
## write ledger entries
# source account
LedgerEntry.objects.create(account=source.account, transaction=transaction, amount=-source.amount)
# target accounts
for target in targets:
LedgerEntry.objects.create(account=target.account, transaction=transaction, amount=-target.amount)
return transaction
def register_simple_transaction(source_account, target_account, amount, description, issuer, date=None, kind=None):
"""
A factory function for registering simple transactions.
This is just a convenience version of ``register_transaction``,
to be used when dealing with simple transactions.
When invoked, this function takes care of the following tasks:
* create a new ``Transaction`` model instance from the given input arguments
* for each account involved in the transaction (i.e., ``source`` and ``target``),
add an entry to the corresponding ledger (as a ``LedgerEntry`` instance).
For details about simple transactions, see ``Transaction`` model's docstring.
Arguments
=========
``source_account``
the source account for the transaction (a stock-like ``Account`` model instance)
``target_account``
the target account for the transaction (a stock-like ``Account`` model instance)
``amount``
the amount of money flowing between source and target accounts (as a signed decimal);
its sign determines the flows's direction with respect to the source account
(i.e., positive -> outgoing, negative -> incoming)
``description``
A string describing what the transaction stands for
``issuer``
The economic subject (a ``Subject`` model instance) who issued the transaction
``date``
A reference date for the transaction (as a ``DateTime`` object);
default to the current date & time
``kind``
A type specification for the transaction. It's an (optional) domain-specific string;
if specified, it must be one of the values listed in ``settings.TRANSACTION_TYPES``
Return value
============
If input is valid, return the newly created ``Transaction`` model instance;
otherwise, report to the client code whatever error(s) occurred during the processing,
by raising a ``MalformedTransaction`` exception.
"""
try:
transaction = Transaction()
# source flow
source = CashFlow.objects.create(account=source_account, amount=amount)
transaction.source = source
transaction.description = description
transaction.issuer = issuer
transaction.date = date
transaction.kind = kind
transaction.save()
# construct the (single) transaction split from input arguments
# entry- & exit- points are missing, because this is an internal transaction
# target flow
target = CashFlow.objects.create(account=target_account, amount=-amount)
split = Split.objects.create(target=target)
# add this single split to the transaction
transaction.split_set = [split]
except ValidationError, e:
err_msg = _(u"Transaction specs are invalid: %(specs)s. The following error(s) occured: %(errors)s")\
% {'specs':transaction_details(transaction), 'errors':str(e.message_dict)}
raise MalformedTransaction(err_msg)
## write ledger entries
# source account
LedgerEntry.objects.create(account=source_account, transaction=transaction, amount=-amount)
# target account
LedgerEntry.objects.create(account=target_account, transaction=transaction, amount=amount)
return transaction
def update_transaction(transaction, **kwargs):
"""
Take an existing transaction and update it as specified by passed arguments;
return the updated transaction.
Conceptually, updating a transaction is a 3 step process:
1) delete every ledger entry associated with the original transaction
(since they were auto-generated, they are no longer valid)
2) update the transaction instance as requested
3) generate the corresponding ledger entries for the updated transaction
"""
# store attributes of original transaction for later reference
orig_splits = transaction.splits
# delete stale ledger entries
transaction.ledger_entries.delete()
# delete the original transaction instance from the DB
transaction.delete()
# register a new version of the original transaction,
# applying any requested changes
new_params = {}
new_params['description'] = transaction.description
new_params['issuer'] = transaction.issuer
new_params['date'] = transaction.date
new_params['kind'] = transaction.kind
# simple transactions
if transaction.is_simple:
new_params['source_account'] = transaction.source.account
new_params['target_account'] = orig_splits[0].target.account
new_params['amount'] = transaction.source.amount
# apply requested changes
new_params.update(kwargs)
transaction = register_simple_transaction(**new_params)
# internal transactions
elif transaction.is_internal:
new_params['source'] = transaction.source
new_params['targets'] = [split.target for split in orig_splits]
# apply requested changes
new_params.update(kwargs)
transaction = register_internal_transaction(**new_params)
# non-split transactions
elif not transaction.is_split:
new_params['source_account'] = transaction.source.account
new_params['target_account'] = orig_splits[0].target.account
new_params['entry_point'] = orig_splits[0].entry_point
new_params['exit_point'] = orig_splits[0].exit_point
new_params['amount'] = transaction.source.amount
# apply requested changes
new_params.update(kwargs)
transaction = register_transaction(**new_params)
# general transactions
else:
new_params['source'] = transaction.source
new_params['splits'] = orig_splits
# apply requested changes
new_params.update(kwargs)
transaction = register_split_transaction(**new_params)
return transaction | seldon/django-simple-accounting | simple_accounting/utils.py | Python | lgpl-3.0 | 19,566 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.accounts.doctype.sales_invoice.pos import update_multi_mode_option
from erpnext.controllers.selling_controller import SellingController
from erpnext.accounts.utils import get_account_currency
from erpnext.stock.doctype.delivery_note.delivery_note import update_billed_amount_based_on_so
from erpnext.projects.doctype.timesheet.timesheet import get_projectwise_timesheet_data
from erpnext.assets.doctype.asset.depreciation \
import get_disposal_account_and_cost_center, get_gl_entries_on_asset_disposal
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos, get_delivery_note_serial_no
from erpnext.setup.doctype.company.company import update_company_current_month_sales
from erpnext.accounts.general_ledger import get_round_off_account_and_cost_center
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
def __init__(self, *args, **kwargs):
super(SalesInvoice, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def set_indicator(self):
"""Set indicator for portal"""
if self.outstanding_amount > 0:
self.indicator_color = "orange"
self.indicator_title = _("Unpaid")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def validate(self):
super(SalesInvoice, self).validate()
self.validate_auto_set_posting_time()
if not self.is_pos:
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.check_close_sales_order("sales_order")
self.validate_debit_to_acc()
self.clear_unallocated_advances("Sales Invoice Advance", "advances")
self.add_remarks()
self.validate_write_off_account()
self.validate_account_for_change_amount()
self.validate_fixed_asset()
self.set_income_account_for_fixed_assets()
if cint(self.is_pos):
self.validate_pos()
if cint(self.update_stock):
self.validate_dropship_item()
self.validate_item_code()
self.validate_warehouse()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
if self._action != 'submit' and self.update_stock and not self.is_return:
set_batch_nos(self, 'warehouse', True)
self.set_against_income_account()
self.validate_c_form()
self.validate_time_sheets_are_submitted()
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount", "items")
if not self.is_return:
self.validate_serial_numbers()
self.update_packing_list()
self.set_billing_hours_and_amount()
self.update_timesheet_billing_for_project()
self.set_status()
def before_save(self):
set_account_for_mode_of_payment(self)
def on_submit(self):
self.validate_pos_paid_amount()
if not self.subscription:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total, self)
self.check_prev_docstatus()
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
self.clear_unallocated_mode_of_payments()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
# this sequence because outstanding may get -ve
self.make_gl_entries()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.check_credit_limit()
self.update_serial_no()
if not cint(self.is_pos) == 1 and not self.is_return:
self.update_against_document_in_jv()
self.update_time_sheet(self.name)
update_company_current_month_sales(self.company)
self.update_project()
def validate_pos_paid_amount(self):
if len(self.payments) == 0 and self.is_pos:
frappe.throw(_("At least one mode of payment is required for POS invoice."))
def before_cancel(self):
self.update_time_sheet(None)
def on_cancel(self):
self.check_close_sales_order("sales_order")
from erpnext.accounts.utils import unlink_ref_doc_from_payment_entries
if frappe.db.get_single_value('Accounts Settings', 'unlink_payment_on_cancellation_of_invoice'):
unlink_ref_doc_from_payment_entries(self)
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.update_serial_no(in_cancel=True)
self.validate_c_form_on_cancel()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
frappe.db.set(self, 'status', 'Cancelled')
update_company_current_month_sales(self.company)
self.update_project()
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.extend([{
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Sales Invoice Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
# 'target_parent_field': 'per_delivered',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'sales_order',
# 'overflow_type': 'delivery',
'extra_cond': """ and exists (select name from `tabSales Invoice` where name=`tabSales Invoice Item`.parent and update_stock=1 and is_return=1)"""
}
])
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
validate_against_credit_limit = False
bypass_credit_limit_check_at_sales_order = cint(frappe.db.get_value("Customer", self.customer,
"bypass_credit_limit_check_at_sales_order"))
if bypass_credit_limit_check_at_sales_order:
validate_against_credit_limit = True
for d in self.get("items"):
if not (d.sales_order or d.delivery_note):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company, bypass_credit_limit_check_at_sales_order)
def set_missing_values(self, for_validate=False):
pos = self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account("Customer", self.customer, self.company)
if not self.due_date and self.customer:
self.due_date = get_due_date(self.posting_date, "Customer", self.customer, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
if pos:
return {
"print_format": pos.get("print_format_for_online"),
"allow_edit_rate": pos.get("allow_user_to_edit_rate"),
"allow_edit_discount": pos.get("allow_user_to_edit_discount")
}
def update_time_sheet(self, sales_invoice):
for d in self.timesheets:
if d.time_sheet:
timesheet = frappe.get_doc("Timesheet", d.time_sheet)
self.update_time_sheet_detail(timesheet, d, sales_invoice)
timesheet.calculate_total_amounts()
timesheet.calculate_percentage_billed()
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def update_time_sheet_detail(self, timesheet, args, sales_invoice):
for data in timesheet.time_logs:
if (self.project and args.timesheet_detail == data.name) or \
(not self.project and not data.sales_invoice) or \
(not sales_invoice and data.sales_invoice == self.name):
data.sales_invoice = sales_invoice
def on_update(self):
self.set_paid_amount()
def set_paid_amount(self):
paid_amount = 0.0
base_paid_amount = 0.0
for data in self.payments:
data.base_amount = flt(data.amount*self.conversion_rate, self.precision("base_paid_amount"))
paid_amount += data.amount
base_paid_amount += data.base_amount
self.paid_amount = paid_amount
self.base_paid_amount = base_paid_amount
def validate_time_sheets_are_submitted(self):
for data in self.timesheets:
if data.time_sheet:
status = frappe.db.get_value("Timesheet", data.time_sheet, "status")
if status not in ['Submitted', 'Payslip']:
frappe.throw(_("Timesheet {0} is already completed or cancelled").format(data.time_sheet))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from POS Profiles"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_profile_item_details, get_pos_profile
if not self.pos_profile:
pos_profile = get_pos_profile(self.company) or {}
self.pos_profile = pos_profile.get('name')
pos = {}
if self.pos_profile:
pos = frappe.get_doc('POS Profile', self.pos_profile)
if not self.get('payments') and not for_validate:
update_multi_mode_option(self, pos)
if not self.account_for_change_amount:
self.account_for_change_amount = frappe.db.get_value('Company', self.company, 'default_cash_account')
if pos:
self.allow_print_before_pay = pos.allow_print_before_pay
if not for_validate and not self.customer:
self.customer = pos.customer
if pos.get('account_for_change_amount'):
self.account_for_change_amount = pos.get('account_for_change_amount')
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account',
'write_off_account', 'write_off_cost_center', 'apply_discount_on'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("items"):
if item.get('item_code'):
for fname, val in get_pos_profile_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("taxes")):
self.set_taxes()
return pos
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def validate_debit_to_acc(self):
account = frappe.db.get_value("Account", self.debit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if not account:
frappe.throw(_("Debit To is required"))
if account.report_type != "Balance Sheet":
frappe.throw(_("Debit To account must be a Balance Sheet account"))
if self.customer and account.account_type != "Receivable":
frappe.throw(_("Debit To account must be a Receivable account"))
self.party_account_currency = account.account_currency
def clear_unallocated_mode_of_payments(self):
self.set("payments", self.get("payments", {"amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tabSales Invoice Payment` where parent = %s
and amount = 0""", self.name)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Delivery Note Item": {
"ref_dn_field": "dn_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Sales Order", "sales_order", "so_detail"],
["Delivery Note", "delivery_note", "dn_detail"]
])
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('items'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def validate_auto_set_posting_time(self):
# Don't auto set the posting date and time if invoice is amended
if self.is_new() and self.amended_from:
self.set_posting_time = 1
self.validate_posting_time()
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':['so_required', 'is_pos'],'Delivery Note':['dn_required', 'update_stock']}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i][0]) == 'Yes':
for d in self.get('items'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 1 \
and not d.get(i.lower().replace(' ','_')) and not self.get(dic[i][1]):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or customer is null or customer = '')""",
(self.project, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project))
def validate_pos(self):
if self.is_return:
if flt(self.paid_amount) + flt(self.write_off_amount) - flt(self.grand_total) < \
1/(10**(self.precision("grand_total") + 1)):
frappe.throw(_("Paid amount + Write Off Amount can not be greater than Grand Total"))
def validate_item_code(self):
for d in self.get('items'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_warehouse(self):
super(SalesInvoice, self).validate_warehouse()
for d in self.get_item_list():
if not d.warehouse and frappe.db.get_value("Item", d.item_code, "is_stock_item"):
frappe.throw(_("Warehouse required for stock Item {0}").format(d.item_code))
def validate_delivery_note(self):
for d in self.get("items"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
self.write_off_account = frappe.db.get_value('Company', self.company, 'write_off_account')
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_account_for_change_amount(self):
if flt(self.change_amount) and not self.account_for_change_amount:
msgprint(_("Please enter Account for Change Amount"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def validate_c_form_on_cancel(self):
""" Display message if C-Form no exists on cancellation of Sales Invoice"""
if self.c_form_applicable == 'Yes' and self.c_form_no:
msgprint(_("Please remove this Invoice {0} from C-Form {1}")
.format(self.name, self.c_form_no), raise_exception = 1)
def validate_dropship_item(self):
for item in self.items:
if item.sales_order:
if frappe.db.get_value("Sales Order Item", item.so_detail, "delivered_by_supplier"):
frappe.throw(_("Could not update stock, invoice contains drop shipping item."))
def update_current_stock(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packed_items'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def update_packing_list(self):
if cint(self.update_stock) == 1:
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
else:
self.set('packed_items', [])
def set_billing_hours_and_amount(self):
if not self.project:
for timesheet in self.timesheets:
ts_doc = frappe.get_doc('Timesheet', timesheet.time_sheet)
if not timesheet.billing_hours and ts_doc.total_billable_hours:
timesheet.billing_hours = ts_doc.total_billable_hours
if not timesheet.billing_amount and ts_doc.total_billable_amount:
timesheet.billing_amount = ts_doc.total_billable_amount
def update_timesheet_billing_for_project(self):
if not self.timesheets and self.project:
self.add_timesheet_data()
else:
self.calculate_billing_amount_for_timesheet()
def add_timesheet_data(self):
self.set('timesheets', [])
if self.project:
for data in get_projectwise_timesheet_data(self.project):
self.append('timesheets', {
'time_sheet': data.parent,
'billing_hours': data.billing_hours,
'billing_amount': data.billing_amt,
'timesheet_detail': data.name
})
self.calculate_billing_amount_for_timesheet()
def calculate_billing_amount_for_timesheet(self):
total_billing_amount = 0.0
for data in self.timesheets:
if data.billing_amount:
total_billing_amount += data.billing_amount
self.total_billing_amount = total_billing_amount
def get_warehouse(self):
user_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_profile[0][1] if user_pos_profile else None
if not warehouse:
global_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where (user is null or user = '') and company = %s""", self.company)
if global_pos_profile:
warehouse = global_pos_profile[0][1]
elif not user_pos_profile:
msgprint(_("POS Profile required to make POS Entry"), raise_exception=True)
return warehouse
def set_income_account_for_fixed_assets(self):
disposal_account = depreciation_cost_center = None
for d in self.get("items"):
if d.is_fixed_asset:
if not disposal_account:
disposal_account, depreciation_cost_center = get_disposal_account_and_cost_center(self.company)
d.income_account = disposal_account
if not d.cost_center:
d.cost_center = depreciation_cost_center
def check_prev_docstatus(self):
for d in self.get('items'):
if d.sales_order and frappe.db.get_value("Sales Order", d.sales_order, "docstatus") != 1:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note and frappe.db.get_value("Delivery Note", d.delivery_note, "docstatus") != 1:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def make_gl_entries(self, gl_entries=None, repost_future_gle=True, from_repost=False):
auto_accounting_for_stock = erpnext.is_perpetual_inventory_enabled(self.company)
if not self.grand_total:
return
if not gl_entries:
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
# if POS and amount is written off, updating outstanding amt after posting all gl entries
update_outstanding = "No" if (cint(self.is_pos) or self.write_off_account) else "Yes"
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, "Customer", self.customer,
self.doctype, self.return_against if cint(self.is_return) else self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
elif self.docstatus == 2 and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
from erpnext.accounts.general_ledger import delete_gl_entries
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
self.make_gle_for_change_amount(gl_entries)
self.make_write_off_gl_entry(gl_entries)
self.make_gle_for_rounding_adjustment(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
grand_total = self.rounded_total or self.grand_total
if grand_total:
# Didnot use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.against_income_account,
"debit": grand_total_in_company_currency,
"debit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("taxes"):
if flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.customer,
"credit": flt(tax.base_tax_amount_after_discount_amount),
"credit_in_account_currency": flt(tax.base_tax_amount_after_discount_amount) \
if account_currency==self.company_currency else flt(tax.tax_amount_after_discount_amount),
"cost_center": tax.cost_center
}, account_currency)
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("items"):
if flt(item.base_net_amount):
if item.is_fixed_asset:
asset = frappe.get_doc("Asset", item.asset)
fixed_asset_gl_entries = get_gl_entries_on_asset_disposal(asset, item.base_net_amount)
for gle in fixed_asset_gl_entries:
gle["against"] = self.customer
gl_entries.append(self.get_gl_dict(gle))
asset.db_set("disposal_date", self.posting_date)
asset.set_status("Sold" if self.docstatus==1 else None)
else:
account_currency = get_account_currency(item.income_account)
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.customer,
"credit": item.base_net_amount,
"credit_in_account_currency": item.base_net_amount \
if account_currency==self.company_currency else item.net_amount,
"cost_center": item.cost_center
}, account_currency)
)
# expense account gl entries
if cint(self.update_stock) and \
erpnext.is_perpetual_inventory_enabled(self.company):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos):
for payment_mode in self.payments:
if payment_mode.amount:
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": payment_mode.account,
"credit": payment_mode.base_amount,
"credit_in_account_currency": payment_mode.base_amount \
if self.party_account_currency==self.company_currency \
else payment_mode.amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
payment_mode_account_currency = get_account_currency(payment_mode.account)
gl_entries.append(
self.get_gl_dict({
"account": payment_mode.account,
"against": self.customer,
"debit": payment_mode.base_amount,
"debit_in_account_currency": payment_mode.base_amount \
if payment_mode_account_currency==self.company_currency \
else payment_mode.amount
}, payment_mode_account_currency)
)
def make_gle_for_change_amount(self, gl_entries):
if cint(self.is_pos) and self.change_amount:
if self.account_for_change_amount:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.account_for_change_amount,
"debit": flt(self.base_change_amount),
"debit_in_account_currency": flt(self.base_change_amount) \
if self.party_account_currency==self.company_currency else flt(self.change_amount),
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.account_for_change_amount,
"against": self.customer,
"credit": self.base_change_amount
})
)
else:
frappe.throw(_("Select change amount account"), title="Mandatory Field")
def make_write_off_gl_entry(self, gl_entries):
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
write_off_account_currency = get_account_currency(self.write_off_account)
default_cost_center = frappe.db.get_value('Company', self.company, 'cost_center')
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.write_off_account,
"credit": self.base_write_off_amount,
"credit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.customer,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center or default_cost_center
}, write_off_account_currency)
)
def make_gle_for_rounding_adjustment(self, gl_entries):
if self.rounding_adjustment:
round_off_account, round_off_cost_center = \
get_round_off_account_and_cost_center(self.company)
gl_entries.append(
self.get_gl_dict({
"account": round_off_account,
"against": self.customer,
"credit_in_account_currency": self.rounding_adjustment,
"credit": self.base_rounding_adjustment,
"cost_center": round_off_cost_center,
}
))
def update_billing_status_in_dn(self, update_modified=True):
updated_delivery_notes = []
for d in self.get("items"):
if d.dn_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", d.dn_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Delivery Note Item", d.dn_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_delivery_notes.append(d.delivery_note)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
frappe.get_doc("Delivery Note", dn).update_billing_percentage(update_modified=update_modified)
def on_recurring(self, reference_doc, subscription_doc):
for fieldname in ("c_form_applicable", "c_form_no", "write_off_amount"):
self.set(fieldname, reference_doc.get(fieldname))
self.due_date = None
def update_serial_no(self, in_cancel=False):
""" update Sales Invoice refrence in Serial No """
invoice = None if (in_cancel or self.is_return) else self.name
if in_cancel and self.is_return:
invoice = self.return_against
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
if serial_no and frappe.db.exists('Serial No', serial_no):
sno = frappe.get_doc('Serial No', serial_no)
sno.sales_invoice = invoice
sno.db_update()
def validate_serial_numbers(self):
"""
validate serial number agains Delivery Note and Sales Invoice
"""
self.set_serial_no_against_delivery_note()
self.validate_serial_against_delivery_note()
self.validate_serial_against_sales_invoice()
def set_serial_no_against_delivery_note(self):
for item in self.items:
if item.serial_no and item.delivery_note and \
item.qty != len(get_serial_nos(item.serial_no)):
item.serial_no = get_delivery_note_serial_no(item.item_code, item.qty, item.delivery_note)
def validate_serial_against_delivery_note(self):
"""
validate if the serial numbers in Sales Invoice Items are same as in
Delivery Note Item
"""
for item in self.items:
if not item.delivery_note or not item.dn_detail:
continue
serial_nos = frappe.db.get_value("Delivery Note Item", item.dn_detail, "serial_no") or ""
dn_serial_nos = set(get_serial_nos(serial_nos))
serial_nos = item.serial_no or ""
si_serial_nos = set(get_serial_nos(serial_nos))
if si_serial_nos - dn_serial_nos:
frappe.throw(_("Serial Numbers in row {0} does not match with Delivery Note".format(item.idx)))
if item.serial_no and cint(item.qty) != len(si_serial_nos):
frappe.throw(_("Row {0}: {1} Serial numbers required for Item {2}. You have provided {3}.".format(
item.idx, item.qty, item.item_code, len(si_serial_nos))))
def validate_serial_against_sales_invoice(self):
""" check if serial number is already used in other sales invoice """
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
sales_invoice = frappe.db.get_value("Serial No", serial_no, "sales_invoice")
if sales_invoice and self.name != sales_invoice:
frappe.throw(_("Serial Number: {0} is already referenced in Sales Invoice: {1}".format(
serial_no, sales_invoice
)))
def update_project(self):
if self.project:
project = frappe.get_doc("Project", self.project)
project.flags.dont_sync_tasks = True
project.update_billed_amount()
project.save()
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Invoices'),
})
return list_context
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment, company):
account = frappe.db.get_value("Mode of Payment Account",
{"parent": mode_of_payment, "company": company}, "default_account")
if not account:
frappe.throw(_("Please set default Cash or Bank account in Mode of Payment {0}")
.format(mode_of_payment))
return {
"account": account
}
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
target_doc.stock_qty = target_doc.qty * flt(source_doc.conversion_factor)
target_doc.base_amount = target_doc.qty * flt(source_doc.base_rate)
target_doc.amount = target_doc.qty * flt(source_doc.rate)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "si_detail",
"parent": "against_sales_invoice",
"serial_no": "serial_no",
"sales_order": "against_sales_order",
"so_detail": "so_detail",
"cost_center": "cost_center"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Sales Invoice", source_name, target_doc)
def set_account_for_mode_of_payment(self):
for data in self.payments:
if not data.account:
data.account = get_bank_cash_account(data.mode_of_payment, self.company).get("account")
| adityaduggal/erpnext | erpnext/accounts/doctype/sales_invoice/sales_invoice.py | Python | gpl-3.0 | 36,088 |
import cv
import sys
import math
import curses
import signal
stdscr = curses.initscr()
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
curses.endwin()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
width = int(sys.argv[1]) if len(sys.argv) > 1 else 50
# cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
palette = [' ', '.', '.', '/', 'c', '(', '@', '#', '8']
while True:
# Capture the image
img = cv.QueryFrame(capture)
# Resize the image
size = cv.GetSize(img)
height = size[0] * width / size[1]
thumbnail = cv.CreateImage(
(height, width),
img.depth,
img.nChannels
)
cv.Resize(img, thumbnail)
img = thumbnail
# Print the output
for x in xrange(img.height):
for y in xrange(img.width):
b, g, r = img[x, y]
value = 0.1145 * b + g * 0.5866 + r * 0.2989
index = int(math.floor(value / (256.0 / (len(palette)))))
try:
stdscr.move(x,y)
stdscr.addch(palette[index])
except:
pass
stdscr.refresh() | voidabhi/python-scripts | CamPy/capture.py | Python | mit | 1,153 |
import json
METADATA_STEM = ".random.metadata"
STOREFILE_STEM = ".random.store"
# Not yet implemented
GROUP_STEM = ".g"
class NoMetadataException(Exception):
pass
def get_storefile_name(uid, rid):
return "{}.{}{}".format(uid, rid, STOREFILE_STEM)
def get_metadatafile_name(uid, rid):
return "{}.{}{}".format(uid, rid, METADATA_STEM)
# Not yet implemented
def get_gstorefile_name(uid, gid):
return "{}.{}{}{}".format(uid, gid, GROUP_STEM, STOREFILE_STEM)
# Not yet implemented
def get_gmetadatafile_name(uid, gid):
return "{}.{}{}{}".format(uid, gid, GROUP_STEM, METADATA_STEM)
def read_metadata(filename):
try:
with open(filename, "r") as metadata:
data = json.loads(metadata.read())
assert len(data.items()) == data["n_eles"]
data_check = dict(data)
del data_check["checksum"]
assert data["checksum"] == hash(frozenset(data_check.items()))
assert data["uid"] != data["rid"]
assert data["split_index"] >= 0 and data["split_index"] < data["n_bytes"]
assert data["direction"] in [1,-1]
assert data["rservice"] in ["random","urandom"]
assert data["encrypt_index"] >= 0 and data["encrypt_index"] < data["n_bytes"]
return data
except IOError as ex:
raise NoMetadataException(ex)
def update_metadata(metadata, updates):
del metadata["checksum"]
for key in updates:
if key == "n_eles":
raise ValueError("Cannot change n_eles, wtf are you doing")
metadata[key] = updates[key]
metadata["checksum"] = hash(frozenset(metadata.items()))
return metadata
| mlsteele/one-time-chat | device/metadata.py | Python | mit | 1,672 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.