gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
from numpy.distutils.conv_template import process_file as process_c_file
import hashlib
import io
import os
import re
import sys
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'alloc.c'),
join('multiarray', 'arrayfunction_override.c'),
join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'buffer.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'datetime.c'),
join('multiarray', 'datetime_busday.c'),
join('multiarray', 'datetime_busdaycal.c'),
join('multiarray', 'datetime_strings.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'dtypemeta.c'),
join('multiarray', 'einsum.c.src'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'mapping.c'),
join('multiarray', 'methods.c'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'nditer_api.c'),
join('multiarray', 'nditer_constr.c'),
join('multiarray', 'nditer_pywrap.c'),
join('multiarray', 'nditer_templ.c.src'),
join('multiarray', 'number.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'strfuncs.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
join('umath', 'ufunc_type_resolution.c'),
join('umath', 'reduction.c'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('Bool', 'npy_bool')
class StealRef:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_STEALS_REF_TO_ARG(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
class NonNull:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_GCC_NONNULL(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_GCC_NONNULL(%d)' % self.arg
class Function:
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, typename, name):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(*a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(*a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = hashlib.md5()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
current_argument = []
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
if filename.endswith(('.c.src', '.h.src')):
fo = io.StringIO(process_c_file(filename))
else:
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = list(range(5))
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
# remove any white space and the closing bracket:
fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip()
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except Exception:
print(filename, lineno + 1)
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
def write_file(filename, data):
"""
Write data to filename
Only write changed data to avoid updating timestamps unnecessarily
"""
if os.path.exists(filename):
with open(filename) as f:
if data == f.read():
return
with open(filename, 'w') as fid:
fid.write(data)
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name, internal_type=None):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
# The type used internally, if None, same as exported (ptr_cast)
self.internal_type = internal_type
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
if self.internal_type is None:
return f"extern NPY_NO_EXPORT {self.ptr_cast} {self.name};\n"
# If we are here, we need to define a larger struct internally, which
# the type can be cast safely. But we want to normally use the original
# type, so name mangle:
mangled_name = f"{self.name}Full"
astr = (
# Create the mangled name:
f"extern NPY_NO_EXPORT {self.internal_type} {mangled_name};\n"
# And define the name as: (*(type *)(&mangled_name))
f"#define {self.name} (*({self.ptr_cast} *)(&{mangled_name}))\n"
)
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT %(type)s %(name)s;
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
class FunctionApi:
def __init__(self, name, index, annotations, return_type, args, api_name):
self.name = name
self.index = index
self.annotations = annotations
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
annstr = [str(a) for a in self.annotations]
annstr = ' '.join(annstr)
astr = """\
NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = list(d.items())
def _key(x):
return x[1] + (x[0],)
return sorted(o, key=_key)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# remove the extra value fields that aren't the index
index_d = {k: v[0] for k, v in d.items()}
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = {v: k for k, v in index_d.items()}
if not len(revert_dict) == len(index_d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in index_d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
fmt = "Same index has been used twice in api definition: {}"
val = ''.join(
'\n\tindex {} -> {}'.format(index, names)
for index, names in doubled.items() if len(names) != 1
)
raise ValueError(fmt.format(val))
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(index_d.values())
expected = set(range(len(indexes)))
if indexes != expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = [(api_dict[func.name][0], func) for func in functions]
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
for name, data in order_dict(d):
a.extend(name)
a.extend(','.join(map(str, data)))
return hashlib.md5(''.join(a).encode('ascii')).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile(r'(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
with open(file, 'r') as fid:
for line in fid:
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = hashlib.md5(tagname)
for func in functions:
print(func)
ah = func.api_hash()
m.update(ah)
print(hex(int(ah, 16)))
print(hex(int(m.hexdigest()[:8], 16)))
if __name__ == '__main__':
main()
| |
import copy
import datetime
import json
from collections import defaultdict
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from backend.common.consts.alliance_color import ALLIANCE_COLORS, AllianceColor
from backend.common.consts.comp_level import CompLevel
from backend.common.helpers.match_helper import MatchHelper
from backend.common.manipulators.event_details_manipulator import (
EventDetailsManipulator,
)
from backend.common.manipulators.match_manipulator import MatchManipulator
from backend.common.models.event import Event
from backend.common.models.event_details import EventDetails
from backend.common.models.match import Match
from backend.common.tests.fixture_loader import load_fixture
class EventSimulator:
"""
Steps through an event in time. At step = 0, only the Event exists:
(step 0) Add all unplayed qual matches
(step 1, substep n) Add results of each of the n qual matches +
rankings being updated (if has_event_details)
(step 2) Add alliance selections (if has_event_details)
(step 3) Add unplayed QF matches
(step 4, substep n) Add results of each of the n QF matches +
update SF matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 5) Add unplayed SF matches (if batch_advance)
(step 6, substep n) Add results of each of the n SF matches +
update F matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 7) Add unplayed F matches (if batch_advance)
(step 8, substep n) Add results of each of the n F matches +
update alliance selection backups (if has_event_details)
"""
def __init__(self, has_event_details=True, batch_advance=False):
self._step = 0
self._substep = 0
# whether to update rankings and alliance selections
self._has_event_details = has_event_details
# whether to update next playoff level all at once, or as winners are determined
self._batch_advance = batch_advance
# Load and save complete data
load_fixture(
"test_data/fixtures/2016nytr_event_team_status.json",
kind={"EventDetails": EventDetails, "Event": Event, "Match": Match},
post_processor=self._event_key_adder,
)
event = Event.get_by_id("2016nytr")
# Add 3rd matches that never got played
unplayed_matches = [
Match(
id="2016nytr_qf1m3",
year=2016,
event=event.key,
comp_level="qf",
set_number=1,
match_number=3,
alliances_json=json.dumps(
{
"red": {
"teams": ["frc3990", "frc359", "frc4508"],
"score": -1,
},
"blue": {
"teams": ["frc3044", "frc4930", "frc4481"],
"score": -1,
},
}
),
time=datetime.datetime(2016, 3, 19, 18, 34),
),
Match(
id="2016nytr_qf3m3",
year=2016,
event=event.key,
comp_level="qf",
set_number=3,
match_number=3,
alliances_json=json.dumps(
{
"red": {
"teams": ["frc20", "frc5254", "frc229"],
"score": -1,
},
"blue": {
"teams": ["frc3003", "frc358", "frc527"],
"score": -1,
},
}
),
time=datetime.datetime(2016, 3, 19, 18, 48),
),
Match(
id="2016nytr_sf1m3",
year=2016,
event=event.key,
comp_level="sf",
set_number=1,
match_number=3,
alliances_json=json.dumps(
{
"red": {
"teams": ["frc3990", "frc359", "frc4508"],
"score": -1,
},
"blue": {
"teams": ["frc5240", "frc3419", "frc663"],
"score": -1,
},
}
),
time=datetime.datetime(2016, 3, 19, 19, 42),
),
]
self._event_details = event.details
self._alliance_selections_without_backup = copy.deepcopy(
event.details.alliance_selections
)
self._alliance_selections_without_backup[1]["backup"] = None
self._played_matches = MatchHelper.organized_matches(event.matches)[1]
self._all_matches = MatchHelper.organized_matches(
event.matches + unplayed_matches
)[1]
# Delete data
event.details.key.delete()
ndb.delete_multi([match.key for match in event.matches])
ndb.get_context().clear_cache()
# Used to keep track of non-batch advancement
self._advancement_alliances = defaultdict(dict)
def _event_key_adder(self, obj):
obj.event = ndb.Key(Event, "2016nytr")
def _update_rankings(self) -> None:
"""
Generates and saves fake rankings
"""
event = none_throws(Event.get_by_id("2016nytr"))
team_wins = defaultdict(int)
team_losses = defaultdict(int)
team_ties = defaultdict(int)
teams = set()
for match in event.matches:
if match.comp_level == CompLevel.QM:
for alliance in ALLIANCE_COLORS:
for team in match.alliances[alliance]["teams"]:
teams.add(team)
if match.has_been_played:
if alliance == match.winning_alliance:
team_wins[team] += 1
elif match.winning_alliance == "":
team_ties[team] += 1
else:
team_losses[team] += 1
rankings = []
for team in sorted(teams):
wins = team_wins[team]
losses = team_losses[team]
ties = team_ties[team]
rankings.append(
{
"team_key": team,
"record": {
"wins": wins,
"losses": losses,
"ties": ties,
},
"matches_played": wins + losses + ties,
"dq": 0,
"sort_orders": [2 * wins + ties, 0, 0, 0, 0],
"qual_average": None,
}
)
rankings = sorted(rankings, key=lambda r: -r["sort_orders"][0])
for i, ranking in enumerate(rankings):
ranking["rank"] = i + 1
EventDetailsManipulator.createOrUpdate(
EventDetails(
id="2016nytr",
rankings2=rankings,
)
)
def step(self) -> None:
if self._step == 0: # Qual match schedule added
for match in copy.deepcopy(self._all_matches[CompLevel.QM]):
for alliance in ALLIANCE_COLORS:
match.alliances[alliance]["score"] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 1: # After each qual match
MatchManipulator.createOrUpdate(
self._played_matches[CompLevel.QM][self._substep]
)
if self._substep < len(self._played_matches[CompLevel.QM]) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
EventDetailsManipulator.createOrUpdate(EventDetails(id="2016nytr"))
elif self._step == 2: # After alliance selections
EventDetailsManipulator.createOrUpdate(
EventDetails(
id="2016nytr",
alliance_selections=self._alliance_selections_without_backup,
)
)
ndb.get_context().flush()
ndb.get_context().clear_cache()
self._step += 1
elif self._step == 3: # QF schedule added
for match in copy.deepcopy(self._all_matches[CompLevel.QF]):
for alliance in ALLIANCE_COLORS:
match.alliances[alliance]["score"] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 4: # After each QF match
new_match = MatchHelper.play_order_sorted_matches(
self._played_matches[CompLevel.QF]
)[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
AllianceColor.RED: 0,
AllianceColor.BLUE: 0,
}
for i in range(new_match.match_number):
win_counts[
none_throws( # pyre-ignore[6]
Match.get_by_id(
Match.renderKeyName(
none_throws(new_match.event.string_id()),
new_match.comp_level,
new_match.set_number,
i + 1,
)
)
).winning_alliance
] += 1
for alliance, wins in win_counts.items():
if wins == 2:
s = new_match.set_number
if s in {1, 2}:
self._advancement_alliances["sf1"][
AllianceColor.RED if s == 1 else AllianceColor.BLUE
] = new_match.alliances[alliance]["teams"]
elif s in {3, 4}:
self._advancement_alliances["sf2"][
AllianceColor.RED if s == 3 else AllianceColor.BLUE
] = new_match.alliances[alliance]["teams"]
else:
raise Exception("Invalid set number: {}".format(s))
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith("sf"):
for i in range(3):
for match in copy.deepcopy(
self._all_matches[CompLevel.SF]
):
key = "2016nytr_{}m{}".format(match_set, i + 1)
if match.key.id() == key:
for color in ALLIANCE_COLORS:
match.alliances[color]["score"] = -1
match.alliances[color][
"teams"
] = alliances.get(color, [])
match.alliances_json = json.dumps(
match.alliances
)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
if self._substep < len(self._played_matches[CompLevel.QF]) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 5: # SF schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches[CompLevel.SF]):
for alliance in ALLIANCE_COLORS:
match.alliances[alliance]["score"] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 6: # After each SF match
new_match = MatchHelper.play_order_sorted_matches(
self._played_matches[CompLevel.SF]
)[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
AllianceColor.RED: 0,
AllianceColor.BLUE: 0,
}
for i in range(new_match.match_number):
win_counts[
none_throws( # pyre-ignore[6]
Match.get_by_id(
Match.renderKeyName(
none_throws(new_match.event.string_id()),
new_match.comp_level,
new_match.set_number,
i + 1,
)
)
).winning_alliance
] += 1
for alliance, wins in win_counts.items():
if wins == 2:
self._advancement_alliances["f1"][
AllianceColor.RED
if new_match.set_number == 1
else AllianceColor.BLUE
] = new_match.alliances[alliance]["teams"]
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith("f"):
for i in range(3):
for match in copy.deepcopy(
self._all_matches[CompLevel.F]
):
key = "2016nytr_{}m{}".format(match_set, i + 1)
if match.key.id() == key:
for color in ALLIANCE_COLORS:
match.alliances[color]["score"] = -1
match.alliances[color][
"teams"
] = alliances.get(color, [])
match.alliances_json = json.dumps(
match.alliances
)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
# Backup robot introduced
if self._substep == 3:
EventDetailsManipulator.createOrUpdate(
EventDetails(
id="2016nytr",
alliance_selections=self._event_details.alliance_selections,
)
)
if self._substep < len(self._played_matches[CompLevel.SF]) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 7: # F schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches[CompLevel.F]):
for alliance in ALLIANCE_COLORS:
match.alliances[alliance]["score"] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 8: # After each F match
MatchManipulator.createOrUpdate(
MatchHelper.play_order_sorted_matches(
self._played_matches[CompLevel.F]
)[self._substep]
)
if self._substep < len(self._played_matches[CompLevel.F]) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
ndb.get_context().clear_cache()
# Re fetch event matches
# event = Event.get_by_id("2016nytr")
# MatchHelper.delete_invalid_matches(event.matches, event)
# ndb.get_context().clear_cache()
self._update_rankings()
| |
import responses
import unittest
import time
from tests.support import with_resource, with_fixture, characters
from twitter_ads.account import Account
from twitter_ads.campaign import Campaign
from twitter_ads.client import Client
from twitter_ads.cursor import Cursor
from twitter_ads.http import Request
from twitter_ads.resource import Resource
from twitter_ads import API_VERSION
from twitter_ads.error import RateLimit
@responses.activate
def test_rate_limit_handle_with_retry_success_1(monkeypatch):
# scenario:
# - 500 (retry) -> 429 (handle rate limit) -> 200 (end)
monkeypatch.setattr(time, 'sleep', lambda s: None)
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=500,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': '1546300800'
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=429,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': str(int(time.time()) + 5)
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=200,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '9999',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40),
options={
'handle_rate_limit': True,
'retry_max': 1,
'retry_delay': 3000,
'retry_on_status': [500]
}
)
account = Account.load(client, '2iqph')
cursor = Campaign.all(account)
assert len(responses.calls) == 4
assert cursor is not None
assert isinstance(cursor, Cursor)
assert cursor.account_rate_limit_limit == '10000'
assert cursor.account_rate_limit_remaining == '9999'
assert cursor.account_rate_limit_reset == '1546300800'
@responses.activate
def test_rate_limit_handle_with_retry_success_2(monkeypatch):
# scenario:
# - 429 (handle rate limit) -> 500 (retry) -> 200 (end)
monkeypatch.setattr(time, 'sleep', lambda s: None)
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=429,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': '1546300800'
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=500,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': str(int(time.time()) + 5)
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=200,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '9999',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40),
options={
'handle_rate_limit': True,
'retry_max': 1,
'retry_delay': 3000,
'retry_on_status': [500]
}
)
account = Account.load(client, '2iqph')
cursor = Campaign.all(account)
assert len(responses.calls) == 4
assert cursor is not None
assert isinstance(cursor, Cursor)
assert cursor.account_rate_limit_limit == '10000'
assert cursor.account_rate_limit_remaining == '9999'
assert cursor.account_rate_limit_reset == '1546300800'
@responses.activate
def test_rate_limit_handle_success(monkeypatch):
monkeypatch.setattr(time, 'sleep', lambda s: None)
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=429,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': str(int(time.time()) + 5)
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=200,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '9999',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40),
options={
'handle_rate_limit': True
}
)
account = Account.load(client, '2iqph')
cursor = Campaign.all(account)
assert len(responses.calls) == 3
assert cursor is not None
assert isinstance(cursor, Cursor)
assert cursor.account_rate_limit_limit == '10000'
assert cursor.account_rate_limit_remaining == '9999'
assert cursor.account_rate_limit_reset == '1546300800'
@responses.activate
def test_rate_limit_handle_error(monkeypatch):
monkeypatch.setattr(time, 'sleep', lambda s: None)
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=429,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': str(int(time.time()) + 5)
})
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
status=429,
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '0',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40),
options={
'handle_rate_limit': True
}
)
account = Account.load(client, '2iqph')
try:
cursor = Campaign.all(account)
except Exception as e:
error = e
print(error)
assert len(responses.calls) == 3
assert isinstance(error, RateLimit)
assert error.reset_at == '1546300800'
@responses.activate
def test_rate_limit_cursor_class_access():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns'),
body=with_fixture('campaigns_all'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '9999',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
cursor = Campaign.all(account)
assert cursor is not None
assert isinstance(cursor, Cursor)
assert cursor.account_rate_limit_limit == '10000'
assert cursor.account_rate_limit_remaining == '9999'
assert cursor.account_rate_limit_reset == '1546300800'
@responses.activate
def test_rate_limit_resource_class_access():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/campaigns/2wap7'),
body=with_fixture('campaigns_load'),
content_type='application/json',
headers={
'x-account-rate-limit-limit': '10000',
'x-account-rate-limit-remaining': '9999',
'x-account-rate-limit-reset': '1546300800'
})
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
campaign = Campaign.load(account, '2wap7')
resource = '/' + API_VERSION + '/accounts/2iqph/campaigns/2wap7'
params = {}
response = Request(client, 'get', resource, params=params).perform()
# from_response() is a staticmethod, so passing campaign instance as dummy.
# We can later change this test case to not call this manually
# once we changed existing classes to pass the header argument.
data = campaign.from_response(response.body['data'], response.headers)
assert data is not None
assert isinstance(data, Resource)
assert data.id == '2wap7'
assert data.entity_status == 'ACTIVE'
assert data.account_rate_limit_limit == '10000'
assert data.account_rate_limit_remaining == '9999'
assert data.account_rate_limit_reset == '1546300800'
| |
import re
from django.shortcuts import render_to_response
from django.http import Http404
from django.conf import settings
from django.core.urlresolvers import get_script_prefix
from graphite.account.models import Profile
from graphite.compat import HttpResponse, HttpResponseBadRequest
from graphite.logger import log
from graphite.util import json, getProfile, getProfileByUsername
from graphite.render.views import parseOptions
from graphite.render.evaluator import evaluateTarget
from graphite.storage import STORE
def graphlot_render(request):
"""Render the main graphlot view."""
metrics = []
for target in request.GET.getlist('target'):
metrics.append(dict(name=target, yaxis="one"))
for target in request.GET.getlist('y2target'):
metrics.append(dict(name=target, yaxis="two"))
untiltime = request.GET.get('until', "-0hour")
fromtime = request.GET.get('from', "-24hour")
events = request.GET.get('events', "")
context = {
'metric_list' : metrics,
'fromtime' : fromtime,
'untiltime' : untiltime,
'events' : events,
'slash' : get_script_prefix()
}
return render_to_response("graphlot.html", context)
def get_data(request):
"""Get the data for one series."""
(graphOptions, requestOptions) = parseOptions(request)
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : False,
'data' : []
}
target = requestOptions['targets'][0]
seriesList = evaluateTarget(requestContext, target)
result = [ dict(
name=timeseries.name,
data=[ x for x in timeseries ],
start=timeseries.start,
end=timeseries.end,
step=timeseries.step,
) for timeseries in seriesList ]
if not result:
raise Http404
return HttpResponse(json.dumps(result), content_type="application/json")
def find_metric(request):
"""Autocomplete helper on metric names."""
try:
query = str( request.REQUEST['q'] )
except:
return HttpResponseBadRequest(
content="Missing required parameter 'q'", content_type="text/plain")
matches = list( STORE.find(query+"*") )
content = "\n".join([node.path for node in matches ])
response = HttpResponse(content, content_type='text/plain')
return response
def header(request):
"View for the header frame of the browser UI"
context = {
'user' : request.user,
'profile' : getProfile(request),
'documentation_url' : settings.DOCUMENTATION_URL,
'slash' : get_script_prefix()
}
return render_to_response("browserHeader.html", context)
def browser(request):
"View for the top-level frame of the browser UI"
context = {
'queryString' : request.GET.urlencode(),
'target' : request.GET.get('target'),
'slash' : get_script_prefix()
}
if context['queryString']:
context['queryString'] = context['queryString'].replace('#','%23')
if context['target']:
context['target'] = context['target'].replace('#','%23') #js libs terminate a querystring on #
return render_to_response("browser.html", context)
def search(request):
query = request.POST['query']
if not query:
return HttpResponse("")
patterns = query.split()
regexes = [re.compile(p,re.I) for p in patterns]
def matches(s):
for regex in regexes:
if regex.search(s):
return True
return False
results = []
index_file = open(settings.INDEX_FILE)
for line in index_file:
if matches(line):
results.append( line.strip() )
if len(results) >= 100:
break
index_file.close()
result_string = ','.join(results)
return HttpResponse(result_string, content_type='text/plain')
def myGraphLookup(request):
"View for My Graphs navigation"
profile = getProfile(request,allowDefault=False)
assert profile
nodes = []
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
try:
path = str( request.GET['path'] )
if path:
if path.endswith('.'):
userpath_prefix = path
else:
userpath_prefix = path + '.'
else:
userpath_prefix = ""
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(userpath_prefix) ]
log.info( "myGraphLookup: username=%s, path=%s, userpath_prefix=%s, %ld graph to process" % (profile.user.username, path, userpath_prefix, len(matches)) )
branch_inserted = set()
leaf_inserted = set()
for graph in matches: #Now let's add the matching graph
isBranch = False
dotPos = graph.name.find( '.', len(userpath_prefix) )
if dotPos >= 0:
isBranch = True
name = graph.name[ len(userpath_prefix) : dotPos ]
if name in branch_inserted: continue
branch_inserted.add(name)
else:
name = graph.name[ len(userpath_prefix): ]
if name in leaf_inserted: continue
leaf_inserted.add(name)
node = {'text' : str(name) }
if isBranch:
node.update( { 'id' : str(userpath_prefix + name + '.') } )
node.update(branchNode)
else:
node.update( { 'id' : str(userpath_prefix + name), 'graphUrl' : str(graph.url) } )
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.myGraphLookup(): could not complete request.")
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def userGraphLookup(request):
"View for User Graphs navigation"
username = request.GET['path']
nodes = []
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
try:
if not username:
profiles = Profile.objects.exclude(user__username='default')
for profile in profiles:
if profile.mygraph_set.count():
node = {
'text' : str(profile.user.username),
'id' : str(profile.user.username)
}
node.update(branchNode)
nodes.append(node)
else:
profile = getProfileByUsername(username)
assert profile, "No profile for username '%s'" % username
for graph in profile.mygraph_set.all().order_by('name'):
node = {
'text' : str(graph.name),
'id' : str(graph.name),
'graphUrl' : str(graph.url)
}
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.userLookup(): could not complete request for %s" % username)
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def json_response(nodes, request=None):
if request:
jsonp = request.REQUEST.get('jsonp', False)
else:
jsonp = False
json_data = json.dumps(nodes)
if jsonp:
response = HttpResponse("%s(%s)" % (jsonp, json_data),
content_type="text/javascript")
else:
response = HttpResponse(json_data, content_type="application/json")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
| |
"""
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| |
import os, sys, gtk, vte, threading
# probono 12/2010
__version__ = "0.1"
def threaded(f):
def wrapper(*args):
t = threading.Thread(target=f, args=args)
t.setDaemon(True)
t.start()
wrapper.__name__ = f.__name__
wrapper.__dict__ = f.__dict__
wrapper.__doc__ = f.__doc__
return wrapper
class Assistant(gtk.Assistant):
def __init__(self, pages_list, width=600, height=400, title="Assistant"):
gtk.Assistant.__init__(self)
self.set_title(title)
self.connect('close', gtk.main_quit)
self.connect('cancel',gtk.main_quit)
self.set_size_request(width, height)
self.connect('prepare', self.prepare_cb)
self.pages = []
for page in pages_list:
self.pages.append(page[0](self, page[1]))
for page in self.pages:
page.append()
self.show()
gtk.gdk.threads_init() # Must be called before gtk.main()
self.errortext = False
def prepare_cb(self, assistant, content):
"""Is called before a new page is rendered"""
page = self.pages[assistant.get_current_page()]
page.prepare_cb()
def go_to_last_page(self):
self.set_current_page(len(self.pages) -1)
def go_to_next_page(self):
self.set_current_page(self.get_current_page() + 1)
class Page(object):
def __init__(self, assistant, title, text=""):
self.a = assistant
self.t = title
self.type = gtk.ASSISTANT_PAGE_CONTENT
self.classname = str(self.__class__).split(".")[-1].replace("'>", "")
self.text = text
if self.text == "": self.text = self.classname
self.compose_content()
def compose_content(self):
"""Subclasses should implement this. Need to return self.content"""
label = gtk.Label(self.text)
label.show()
label.set_line_wrap(True)
self.content = label
def append(self):
self.a.append_page(self.content)
self.a.set_page_complete(self.content, True)
self.a.set_page_title(self.content, self.t)
self.a.set_page_type(self.content, self.type)
def prepare_cb(self):
"""Subclasses should implement this to do their action"""
print "Preparing %s" % (self)
def __repr__(self):
return "<%s '%s'>" % (self.classname, self.t)
class TextPage(Page):
pass
class DirChooserPage(Page):
def __init__(self, assistant, title):
self.chooser_type = gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER
Page.__init__(self, assistant, title)
def compose_content(self):
self.chooser = gtk.FileChooserWidget(self.chooser_type)
self.chooser.set_current_folder(os.environ.get('HOME'))
self.chooser.show()
self.content = self.chooser
def prepare_cb(self):
self.a.set_page_complete(self.content, False)
Page.prepare_cb(self)
self.chooser.connect("selection-changed", self.selection_changed)
def selection_changed(self, widget):
self.selection = widget.get_filename()
print self.selection
self.a.set_page_complete(self.content, True)
class ResultPage(Page):
def __init__(self, assistant, title):
Page.__init__(self, assistant, title)
self.type = gtk.ASSISTANT_PAGE_SUMMARY
vbox = gtk.VBox()
self.icon = gtk.Image()
self.icon.set_from_file(os.path.join(os.path.dirname(__file__), "Gnome-emblem-default.png"))
self.label = gtk.Label(str(self))
self.label.set_line_wrap(True)
vbox.add(self.icon)
vbox.add(self.label)
vbox.show_all()
self.content = vbox
def prepare_cb(self):
Page.prepare_cb(self)
if self.a.errortext:
self.label.set_text(self.a.errortext)
self.icon.set_from_file(os.path.join(os.path.dirname(__file__), "Gnome-dialog-warning.png"))
self.a.set_page_title(self.content, "Error")
class RunnerPage(Page):
def __init__(self, assistant, title):
Page.__init__(self, assistant, title)
self.type = gtk.ASSISTANT_PAGE_PROGRESS
self.command = ["sleep", "1"]
def compose_content(self):
self.rt = RunTerminal(self)
self.rt.show()
self.content = self.rt
def prepare_cb(self):
Page.prepare_cb(self)
self.run_command()
@threaded
# For this to work, the gtk.gdk.threads_init() function must be called before the gtk.main() function
def run_command(self):
print self.command
gtk.gdk.threads_enter() # this must be called in a function that is decorated with @threaded
self.rt.run_command(self.command)
gtk.gdk.threads_leave() # this must be called in a function that is decorated with @threaded
def command_succeeded(self, output):
print "The command ran successfully"
self.a.set_page_complete(self.content, True) # otherwise we can't go anywhere
self.a.go_to_next_page()
def command_failed(self, output):
print "The command did not run successfully"
self.a.errortext = output
self.a.set_page_complete(self.content, True) # otherwise we can't go anywhere
self.a.go_to_last_page()
class ProgressPage(RunnerPage):
def compose_content(self):
"""We also use the VTE here as in the superclass, but we don't show it"""
box = gtk.VBox()
self.rt = RunTerminal(self)
box.add(self.rt)
pbar = gtk.ProgressBar()
pbar.show()
box.add(pbar)
box.show()
self.content = box
class RunTerminal(vte.Terminal):
def __init__(self, caller):
vte.Terminal.__init__(self)
self.connect('child-exited', self.run_command_done)
self.caller = caller
def run_command(self, command_list):
self.caller.a.set_page_complete(self.caller.a.get_nth_page(self.caller.a.get_current_page()), False)
self.thread_running = True
command = command_list
pid = self.fork_command(command=command[0], argv=command, directory=os.getcwd())
if pid <= 0:
self.caller.command_failed("Failed to run %s" % (command[0]))
while self.thread_running:
gtk.main_iteration()
def run_command_done(self, terminal):
self.thread_running = False
result = terminal.get_child_exit_status()
output = terminal.get_text(lambda *a: True).rstrip()
print result
print output
if result == 0:
self.caller.command_succeeded(output)
else:
self.caller.command_failed(output)
if __name__=="__main__":
pages = [
[TextPage, "Welcome"],
[DirChooserPage, "Select AppDir"],
[ProgressPage, "Prescanning..."],
[TextPage, "Install now"],
[ProgressPage, "Postscanning..."],
[TextPage, "Select desktop file"],
[RunnerPage, "Profiling..."],
[TextPage, "Fine-tune the AppDir"],
[RunnerPage, "Creating AppImage..."],
[ResultPage, "Done"]
]
a = Assistant(pages, title="Assistant Helper")
gtk.main()
| |
import ConfigParser
from copy import copy
from datetime import datetime
import os
import random
import re
import tarfile
import shutil
from tempfile import mkstemp, mkdtemp
from time import strftime
from .config import Config
from .catalogue import Catalogue
from .crypto import Encryptor
from .exceptions import IceItException
from .utils import SetUtils, StringUtils, FileFinder, FileUtils
from .backends import GlacierBackend, S3Backend
from .log import get_logger
log = get_logger(__name__)
# Put your files on ice. Compress, encrypt, obfuscate and archive them on Amazon Glacier.
#
# Inspired by duply/duplicity and bakthat.
#
# @todo - Allow files larger than a certain size to be split into pieces
# @todo - Implement a restore command. It should allow the version to be specified by backup date, e.g.
# iceit.py restore default /full/or/partial/file/name@yyyy-mm-dd_HH:MM:SS
# This will restore a specific version. By default the most recent will be restored.
# Specifying file names in this way, we'll display all matching files in a numbered list, and the user
# can select a number to show which file they want to restore.
# Also allow an output path to be specified.
# @todo - An aggressive dedupe mode that compares candidate files by hashes in the db instead of looking at
# file name
class IceIt(object):
def __init__(self, config_profile):
self.config_profile = config_profile
self.config = Config(config_profile)
try:
self.encryptor = Encryptor(self.config.get('encryption', 'key_id'))
except ConfigParser.NoSectionError:
#@todo: add conditionals to places that use this object
self.encryptor = None
self.catalogue = None # Need to open it when need it because if there's no config we'll be in trouble
def __open_catalogue(self):
"Open the catalogue"
if not self.catalogue:
self.catalogue = Catalogue(self.config.get_catalogue_path())
def __initialise_backends(self):
"Connect to storage backends"
log.debug("Initialising backends...")
access_key = self.config.get("aws", "access_key")
secret_key = self.config.get("aws", "secret_key")
vault_name = self.config.get("aws", "glacier_vault")
region_name = self.config.get("aws", "glacier_region")
# where files are stored long-term
self.glacier_backend = GlacierBackend(access_key, secret_key, vault_name, region_name)
log.debug("Connected to Glacier")
bucket_name = self.config.get("aws", "s3_bucket")
s3_location = self.config.get("aws", "s3_location")
# A backend for accessing files immediately. The catalogue will be backed up here.
self.s3_backend = S3Backend(access_key, secret_key, bucket_name, s3_location)
log.debug("Connected to S3")
def write_config_file(self, settings):
return self.config.write_config_file(settings)
def key_pair_exists(self):
"Returns a boolean indicating whether a key pair already exists"
return os.path.exists(self.config.get_public_key_path()) or os.path.exists(self.config.get_private_key_path())
def generate_key_pair(self, key_type, length, options):
"Generate a new key pair"
return self.encryptor.generate_key_pair(key_type, length, options)
def list_secret_keys(self):
"List the secret keys"
return self.encryptor.list_secret_keys()
def set_key_id(self, key_id):
"Set the ID of the key to use for encryption"
self.encryptor.key_id = key_id
def export_keys(self):
"Export the key pair"
return self.encryptor.export_keys(self.config.get_public_key_path(), self.config.get_private_key_path())
def backup_encryption_keys(self, symmetric_passphrase):
"""
Backup encryption keys to S3. Keys will be combined into a tar.bz2 archive then encrypted with
GPG using symmetric encryption before being uploaded to S3.
@param string symmetric_passphrase - The passphrase to use to encrypt the archive.
"""
self.__initialise_backends()
(file_handle, archive_path) = mkstemp()
tar_archive = tarfile.open(name=archive_path, mode='w:bz2')
public_key_path = self.config.get_public_key_path()
log.info("Adding public key '%s' to key archive '%s'" % (public_key_path, archive_path))
tar_archive.add(public_key_path)
private_key_path = self.config.get_private_key_path()
log.info("Adding private key '%s' to key archive '%s'" % (private_key_path, archive_path))
tar_archive.add(private_key_path)
log.info("Closing key archive")
tar_archive.close()
# encrypt with GPG
encrypted_file_name = self.encryptor.encrypt_symmetric(passphrase=symmetric_passphrase, input_file=archive_path,
output_dir=os.path.dirname(archive_path))
# upload to S3
self.s3_backend.upload('%s%s-%s' % (self.config.get('aws', 's3_key_prefix'),
self.config_profile, strftime("%Y%m%d_%H%M%S")),
encrypted_file_name)
# Delete archives
log.info("Deleting unencrypted temporary key archive %s" % archive_path)
os.unlink(archive_path)
log.info("Deleting encrypted temporary key archive %s" % encrypted_file_name)
os.unlink(encrypted_file_name)
def __backup_catalogue_and_config(self):
"""
Backup catalogue and config file to S3. Catalogue and config will be combined into a tar.bz2 archive then
encrypted with GPG before being uploaded to S3.
"""
self.__initialise_backends()
(file_handle, archive_path) = mkstemp()
tar_archive = tarfile.open(name=archive_path, mode='w:bz2')
catalogue_path = self.config.get_catalogue_path()
log.info("Adding catalogue '%s' to config backup archive '%s'" % (catalogue_path, archive_path))
tar_archive.add(name=catalogue_path, arcname=os.path.basename(catalogue_path))
config_path = self.config.get_config_file_path()
log.info("Adding config file '%s' to config backup archive '%s'" % (config_path, archive_path))
tar_archive.add(name=config_path, arcname=os.path.basename(config_path))
log.info("Closing config backup archive")
tar_archive.close()
# encrypt with GPG
encrypted_file_name = self.encryptor.encrypt(input_file=archive_path, output_dir=os.path.dirname(archive_path))
# upload to S3
self.s3_backend.upload('%s%s-%s' % (self.config.get('aws', 's3_catalogue_prefix'),
self.config_profile, strftime("%Y%m%d_%H%M%S")),
encrypted_file_name)
# Delete archives
log.info("Deleting unencrypted config backup archive %s" % archive_path)
os.unlink(archive_path)
log.info("Deleting encrypted temporary config backup archive %s" % encrypted_file_name)
os.unlink(encrypted_file_name)
def is_configured(self):
"Return a boolean indicating whether the current config profile is valid and complete"
return self.config.is_valid()
def encryption_enabled(self):
"""
Returns a boolean indicating whether to encrypt files
@return boolean True if we should encrypt files
"""
return len(self.encryptor.key_id) > 0
def __trim_ineligible_files(self, potential_files):
"""
Return the supplied set with all files that shouldn't be backed up removed.
"""
# apply configured exclude patterns
total_excluded = 0
exclude_patterns = self.config.get('processing', 'exclude_patterns').split(',')
for pattern in exclude_patterns:
remove_set = SetUtils.match_patterns(potential_files, pattern)
total_excluded += len(remove_set)
potential_files -= remove_set
log.info("%d files excluded by %d exclude patterns." % (total_excluded, len(exclude_patterns)))
if len(potential_files) == 0:
return potential_files
eligible_files = copy(potential_files)
for file_path in potential_files:
catalogue_items = self.catalogue.get(file_path)
for catalogue_item in catalogue_items:
# if the mtime hasn't changed, remove from eligible_files
log.info("File %s is already in the catalogue. Checking for changes..." % file_path)
current_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
if catalogue_item.file_mtime == current_mtime:
log.info("File has the same modification time as previous backup. Skipping.")
eligible_files -= set([file_path])
continue
# if it has, hash the file and remove from eligible_files if the old and current hashes are the same
log.info("File has a different modification time from previous backup. Checking hashes to confirm "
"modifications...")
current_hash = FileUtils.get_file_hash(file_path)
if catalogue_item.source_hash == current_hash:
log.info("File hash matches hash of backed up file. File will NOT be backed up on this run.")
eligible_files -= set([file_path])
continue
return eligible_files
def __process_files(self, eligible_files):
"""
Perform all necessary processing prior to initiating an upload to the file store, e.g. combine files that
need archiving into archives, compress files that should be compressed, encrypt files as necessary and
obfuscate file names.
"""
temp_dir = mkdtemp('-iceit')
# compile all disable_compression patterns
disable_compression_regexes = []
for pattern in self.config.get('processing', 'disable_compression_patterns').split(','):
disable_compression_regexes.append(re.compile(pattern, re.IGNORECASE))
total_files_to_backup = len(eligible_files)
files_backed_up = 0
# paranoia - shuffle the order of the eligible_files so that no-one could know which encrypted file
# corresponds to which uploaded file even if they had a directory listing of files that were uploaded
# and could inspect timestamps in Glacier (if all files were the same size)
eligible_files_list = list(eligible_files)
random.shuffle(eligible_files_list)
for file_name in eligible_files_list:
source_path = file_name
existing_catalogue_item = self.catalogue.get(source_path)
if self.config.getboolean('catalogue', 'store_source_file_hashes') is True:
log.info("Generating hash of source file %s" % file_name)
# get a hash of the input file so we know when we've restored a file that it has been successful
source_file_hash = FileUtils.get_file_hash(file_name)
log.info("Source file SHA256 hash is %s" % source_file_hash)
else:
source_file_hash = None
# compress files if they don't match any exclusion rules
compress_file = True
for regex in disable_compression_regexes:
log.debug("Checking whether file %s matches regex %s" % (file_name, regex))
if regex.match(file_name) is not None:
log.info("Skipping compression of %s" % file_name)
compress_file = False
break
# compress file
if compress_file:
file_name = FileUtils.compress_file(file_name, temp_dir)
# encrypt file
if self.encryption_enabled():
unencrypted_file_name = file_name
file_name = self.encryptor.encrypt(file_name, temp_dir)
if self.config.getboolean('processing', 'obfuscate_file_names') is True:
old_file_name = file_name
file_name = os.path.join(temp_dir, StringUtils.get_random_string())
# if the file is already in temp_dir, rename it
if old_file_name.startswith(temp_dir):
log.info("Obfuscating file %s. Renaming to %s" % (old_file_name, os.path.basename(file_name)))
os.rename(old_file_name, file_name)
else:
# otherwise create a symlink using the obfuscated name to the file to avoid having to copy it
os.symlink(old_file_name, file_name)
log.info("Generating hash of final processed file %s" % file_name)
final_file_hash = FileUtils.get_file_hash(file_name)
log.info("Processed file SHA256 hash is %s" % final_file_hash)
# upload to storage backend
# @todo split large files into smaller chunks and process them all together. They should be separately encrypted and
# @todo hashed so we know when downloading that each piece is correct
# @todo - confirm that uploads where errors were caught did actually upload correctly
aws_archive_id = self.glacier_backend.upload(file_name)
# delete the temporary file or symlink
if file_name.startswith(temp_dir):
log.info("Deleting temporary file/symlink %s" % file_name)
os.unlink(file_name)
if unencrypted_file_name.startswith(temp_dir):
log.info("Deleting unencrypted file/symlink %s" % unencrypted_file_name)
os.unlink(unencrypted_file_name)
try:
catalogue_item_id = existing_catalogue_item.id
except AttributeError:
catalogue_item_id = None
# update the catalogue
self.catalogue.add_item(item={
'source_path': source_path,
'aws_archive_id': aws_archive_id,
'file_mtime': datetime.fromtimestamp(os.path.getmtime(source_path)),
'source_hash': source_file_hash,
'processed_hash': final_file_hash,
'last_backed_up': datetime.now()
}, id=catalogue_item_id)
files_backed_up += 1
log.info("Backed up %d of %d files" % (files_backed_up, total_files_to_backup))
# remove temporary directory
log.info("All files processed. Deleting temporary directory %s" % temp_dir)
os.rmdir(temp_dir)
def backup(self, paths, recursive):
"""
Backup the given paths under the given config profile, optionally recursively.
"""
self.__initialise_backends()
potential_files = set()
# find all files in the given paths and add to a set
for path in paths:
log.info("Finding files in path %s (recursive=%s)" % (path, recursive))
if os.path.isdir(path):
file_finder = FileFinder(path, recursive)
potential_files.update(file_finder.get_files())
else:
potential_files.update([path])
log.info("%d files found in %d paths" % (len(potential_files), len(paths)))
try:
self.__open_catalogue()
# remove ineligible files from the backup list, e.g. files that match exclusion patterns, files that have
# been backed up previously and haven't since been modified, etc.
eligible_files = self.__trim_ineligible_files(potential_files)
if len(eligible_files) > 0:
# Perform all necessary processing to backup the file, e.g. compress files that should be compressed,
# encrypt files as necessary, obfuscate file names and upload to storage backend.
self.__process_files(eligible_files)
# if all went well, save new catalogue to highly available storage backend (S3)
self.__backup_catalogue_and_config()
#@todo - purge old config backups
else:
log.info("No files need backing up.")
except Exception as e:
log.exception("Caught an exception. Closing catalogue.")
finally:
self.catalogue.close()
def list_catalogues(self):
"""
List catalogues backed up to S3
"""
self.__initialise_backends()
catalogues = [i for i in self.s3_backend.ls() if i['name'].startswith(self.config.get('aws', 's3_catalogue_prefix'))]
return sorted(catalogues)
def restore_catalogue(self, name):
"""
Restore a particular catalogue and rename any existing one
"""
self.__initialise_backends()
log.debug("Creating temporary dir to download archive to")
temp_dir = mkdtemp(prefix='iceit-catalogue-restore')
log.debug("Created %s" % temp_dir)
(handle, temp_file_path) = mkstemp(prefix='iceit-catalogue-', dir=temp_dir)
log.info("Retrieving file '%s' to temporary path '%s'" % (name, temp_file_path))
self.s3_backend.get_to_file(name, temp_file_path)
decrypted_archive = "%s.decrypted" % temp_file_path
log.info("Decrypting retrieved archive")
self.encryptor.decrypt(input_file=temp_file_path, output_file=decrypted_archive)
log.info("Extracting decrypted archive")
if not tarfile.is_tarfile(decrypted_archive):
raise RuntimeError("Error: Unable to read tar file '%s'" % decrypted_archive)
tar_archive = tarfile.open(name=decrypted_archive, mode='r:bz2')
log.info("Extracting contents of archive to '%s'" % temp_dir)
tar_archive.extractall(path=temp_dir)
tar_archive.close()
log.info("Deleting downloaded archive '%s'" % decrypted_archive)
os.unlink(decrypted_archive)
existing_catalogue_path = self.config.get_catalogue_path()
if os.path.exists(existing_catalogue_path):
new_catalogue_path = "%s-%s" % (existing_catalogue_path, strftime("%Y%m%d%H%M%S"))
log.info("Renaming existing catalogue from %s to %s" % (existing_catalogue_path, new_catalogue_path))
os.rename(existing_catalogue_path, new_catalogue_path)
restored_catalogue_path = os.path.join(temp_dir, self.config.get('catalogue', 'name'))
log.info("Moving downloaded catalogue from '%s' to '%s'" % (restored_catalogue_path,
existing_catalogue_path))
os.rename(restored_catalogue_path, existing_catalogue_path)
log.info("Deleting temporary directory '%s'" % temp_dir)
shutil.rmtree(temp_dir)
def list_keys(self):
"""
List keys backed up to S3
"""
self.__initialise_backends()
keys = [i for i in self.s3_backend.ls() if i['name'].startswith(self.config.get('aws', 's3_key_prefix'))]
return sorted(keys)
def find_in_catalogue(self, filter):
"""
Find entries in the catalogue that match the given filter
@param filter: Optional filter to apply to file names
:return: list of matching entries
"""
try:
self.__open_catalogue()
items = self.catalogue.find_item(filter)
return items
except Exception as e:
#@todo rethrow exception
log.exception("Caught an exception. Closing catalogue.")
finally:
self.catalogue.close()
def create_inventory_retrieval_job(self):
"""
Create a job to retrieve the glacier inventory.
:return: ID fo the new inventory retrieval job
"""
self.__initialise_backends()
return self.glacier_backend.create_inventory_retrieval_job(sns_topic=self.config.get('aws', 'sns_topic_arn'))
def create_archive_retrieval_job(self, aws_archive_id):
"""
Create a job to retrieve an archive from glacier
:param aws_archive_id: The AWS archive ID of the archive to create a retrieval job for
:return: ID fo the new inventory retrieval job
"""
self.__initialise_backends()
return self.glacier_backend.create_archive_retrieval_job(aws_archive_id=aws_archive_id, sns_topic=self.config.get('aws', 'sns_topic_arn'))
def list_jobs(self):
"""
List glacier jobs
:return: List of objects containing information about jobs
"""
self.__initialise_backends()
jobs = self.glacier_backend.list_jobs()
try:
self.__open_catalogue()
# translate AWS archive IDs to file names from our catalogue
for job in jobs:
if job.archive_id:
log.debug("Trying to find source_path to match AWS archive ID '%s'" % job.archive_id)
row = self.catalogue.find_item(filter_field='aws_archive_id', filter=job.archive_id)
log.debug("Found %d results" % len(row))
if len(row) == 1:
job.source_path = row[0][1]
job.source_hash = row[0][4]
elif len(row) == 0:
job.source_path = "AWS archive ID not found in local catalogue"
else:
log.warn("Didn't expect to find %d results" % len(row))
except Exception as e:
#@todo rethrow exception
log.exception("Caught an exception. Closing catalogue.")
finally:
self.catalogue.close()
return jobs
def download(self, dest, jobs):
"""
Download the given jobs
:param dest: Directory to write files to
:param jobs: List of completed boto.glacier.job.Job objects to download and decrypt
:return:
"""
dest = os.path.expanduser(dest)
if not os.path.exists(dest):
log.fatal("Destination directory '%s' doesn't exist" % dest)
log.info("Downloading jobs...")
for job in jobs:
if self.encryptor and job.action == 'ArchiveRetrieval':
original_source_path = os.path.join(dest, job.source_path.lstrip('/'))
log.debug("Altering job source path to include .enc extension")
job.source_path = "%s.enc" % original_source_path
dest_path = self.glacier_backend.concurrent_download(dest, job)
log.info("File downloaded to %s" % dest_path)
if self.encryptor and job.action == 'ArchiveRetrieval':
log.info("Decrypting file %s to %s" % (dest_path, original_source_path))
self.encryptor.decrypt(input_file=dest_path, output_file=original_source_path)
log.info("File decrypted. Verifying checksum.")
downloaded_hash = FileUtils.get_file_hash(original_source_path)
if not job.source_hash == downloaded_hash:
msg = "Error: Verification of hash for %s failed. Expected %s, " \
"calculated %s" % (original_source_path, job.source_hash, downloaded_hash)
log.warn(msg)
raise IceItException(msg)
log.info("File hash validation passed")
log.debug("Removing downloaded encrypted file %s" % dest_path)
os.unlink(dest_path)
| |
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`ephemeral_test` --- lib.sibra.ext.ephemeral unit tests
============================================================
"""
# Stdlib
from unittest.mock import call, patch
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.sibra.ext.ephemeral import SibraExtEphemeral
from lib.types import RouterFlag
from test.testcommon import assert_these_calls, create_mock
class TestSibraExtEphemeralParse(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._parse
"""
def test(self):
inst = SibraExtEphemeral()
inst._parse_start = create_mock()
inst._parse_start.return_value = "data", "req"
inst._parse_path_id = create_mock()
inst._parse_path_id.side_effect = ["path id%s" % i for i in range(3)]
inst._update_idxes = create_mock()
inst._parse_active_blocks = create_mock()
inst._parse_end = create_mock()
inst.path_lens = [3, 4, 0]
# Call
inst._parse("raw")
# Tests
inst._parse_start.assert_called_once_with("raw")
assert_these_calls(inst._parse_path_id, [
call("data", False), call("data"), call("data")])
inst._update_idxes.assert_called_once_with()
ntools.eq_(inst.path_ids, ["path id0", "path id1", "path id2"])
inst._parse_active_blocks.assert_called_once_with("data")
ntools.eq_(inst.active_blocks, inst._parse_active_blocks.return_value)
inst._parse_end.assert_called_once_with("data", "req")
class TestSibraExtEphemeralParseActiveBlocks(object):
"""
Unit tests for
lib.sibra.ext.ephemeral.SibraExtEphemeral._parse_active_blocks
"""
def test_non_setup(self):
inst = SibraExtEphemeral()
inst._parse_block = create_mock()
inst.setup = False
inst.total_hops = 9
# Call
ntools.eq_(inst._parse_active_blocks("data"),
[inst._parse_block.return_value])
# Tests
inst._parse_block.assert_called_once_with("data", 9)
def test_setup(self):
inst = SibraExtEphemeral()
inst.setup = True
inst._parse_block = create_mock()
inst._parse_block.side_effect = "block0", "block1"
inst.path_lens = [3, 4, 0]
# Call
ntools.eq_(inst._parse_active_blocks("data"), ["block0", "block1"])
# Tests
assert_these_calls(inst._parse_block,
[call("data", 3), call("data", 4)])
class TestSibraExtEphemeralSetupFromValues(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral.setup_from_values
"""
@patch("lib.sibra.ext.ephemeral.SibraExtEphemeral._set_size", autospec=True)
@patch("lib.sibra.ext.ephemeral.SibraExtEphemeral._parse_src_ia",
autospec=True)
@patch("lib.sibra.ext.ephemeral.SibraExtEphemeral._calc_total_hops",
autospec=True)
@patch("lib.sibra.ext.ephemeral.ResvBlockEphemeral", autospec=True)
def test(self, resvblk, total_hops, parse_src_ia, set_size):
steady_blocks = []
for i in 3, 4, 1:
b = create_mock(["num_hops"])
b.num_hops = i
steady_blocks.append(b)
# Call
inst = SibraExtEphemeral.setup_from_values(
"req info", "path id", ["steady 0", "steady 1", "steady 2"],
steady_blocks)
# Tests
ntools.assert_is_instance(inst, SibraExtEphemeral)
ntools.eq_(inst.steady, False)
ntools.eq_(inst.path_lens, [3, 4, 1])
total_hops.assert_called_once_with(inst)
ntools.eq_(inst.path_ids,
["path id", "steady 0", "steady 1", "steady 2"])
ntools.eq_(inst.active_blocks, steady_blocks)
resvblk.from_values.assert_called_once_with("req info", 0)
ntools.eq_(inst.req_block, resvblk.from_values.return_value)
parse_src_ia.assert_called_once_with(inst)
set_size.assert_called_once_with(inst)
class TestSibraExtEphemeralCalcTotalHops(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._calc_total_hops
"""
def _check(self, path_lens, expected):
inst = SibraExtEphemeral()
inst.setup = True
inst.path_lens = path_lens
# Call
inst._calc_total_hops()
# Tests
ntools.eq_(inst.total_hops, expected)
def test(self):
for path_lens, expected in (
([2, 0, 0], 2), ([2, 2, 0], 3), ([2, 3, 4], 7),
):
yield self._check, path_lens, expected
class TestSibraExtEphemeralUpdateIdxes(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._update_idxes
"""
@patch("lib.sibra.ext.ephemeral.SibraExtBase._update_idxes",
autospec=True)
def test_not_setup(self, super_update):
inst = SibraExtEphemeral()
inst.setup = False
# Call
inst._update_idxes()
# Tests
super_update.assert_called_once_with(inst)
def _check_setup(self, path_lens, sof_idx, b_idx, rel_s_idx, curr_hop):
inst = SibraExtEphemeral()
inst.setup = True
inst.path_lens = path_lens
inst.sof_idx = sof_idx
# Call
inst._update_idxes()
# Tests
ntools.eq_(inst.block_idx, b_idx)
ntools.eq_(inst.rel_sof_idx, rel_s_idx)
ntools.eq_(inst.curr_hop, curr_hop)
def test_setup(self):
for sof_idx, b_idx, rel_s_idx, curr_hop in (
(0, 0, 0, 0), (1, 0, 1, 1), (2, 1, 0, 1),
(3, 1, 1, 2), (4, 1, 2, 3), (5, 2, 0, 3),
(6, 2, 1, 4), (8, 2, 3, 6),
):
yield (self._check_setup, [2, 3, 4], sof_idx, b_idx, rel_s_idx,
curr_hop)
def test_error(self):
inst = SibraExtEphemeral()
inst.setup = True
inst.path_lens = [2, 3, 4]
inst.sof_idx = 9
# Call
ntools.assert_raises(AssertionError, inst._update_idxes)
class TestSibraExtEphemeralProcessSetup(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._process_setup
"""
@patch("lib.sibra.ext.ephemeral.SibraExtBase._process_setup", autospec=True)
def test_egress_forward(self, super_process):
inst = SibraExtEphemeral()
inst.get_next_ifid = create_mock()
meta = create_mock(["from_local_as"])
# Call
ntools.eq_(inst._process_setup(meta),
[(RouterFlag.FORWARD, inst.get_next_ifid.return_value)])
# Tests
super_process.assert_called_once_with(inst, meta)
@patch("lib.sibra.ext.ephemeral.SibraExtBase._process_setup", autospec=True)
def test_ingress_deliver(self, super_process):
inst = SibraExtEphemeral()
inst._setup_switch_block = create_mock()
inst.get_next_ifid = create_mock()
inst.get_next_ifid.return_value = 0
meta = create_mock(["from_local_as"])
meta.from_local_as = False
# Call
ntools.eq_(inst._process_setup(meta), [(RouterFlag.DELIVER,)])
# Tests
inst._setup_switch_block.assert_called_once_with()
class TestSibraExtEphemeralSetupSwitchBlock(object):
"""
Unit tests for
lib.sibra.ext.ephemeral.SibraExtEphemeral._setup_switch_block
"""
def _check(self, fwd, b_idx, rel_s_idx, expected):
inst = SibraExtEphemeral()
inst._update_idxes = create_mock()
inst.fwd = fwd
inst.block_idx = b_idx
inst.rel_sof_idx = rel_s_idx
inst.sof_idx = 0
for i in 2, 3, 4:
block = create_mock(["num_hops"])
block.num_hops = i
inst.active_blocks.append(block)
# Call
inst._setup_switch_block()
# Tests
ntools.eq_(inst.sof_idx, expected)
if expected != 0:
inst._update_idxes.assert_called_once_with()
def test_fwd(self):
for b_idx, rel_s_idx, expected in (
(0, 0, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0),
(1, 2, 1), (2, 0, 0), (2, 3, 0),
):
yield self._check, True, b_idx, rel_s_idx, expected
def test_rev(self):
for b_idx, rel_s_idx, expected in (
(0, 0, 0), (0, 1, 0), (1, 0, -1), (1, 1, 0),
(1, 2, 0), (2, 0, -1), (2, 3, 0),
):
yield self._check, False, b_idx, rel_s_idx, expected
class TestSibraExtEphemeralAddHop(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._add_hop
"""
@patch("lib.sibra.ext.ephemeral.SibraExtBase._add_hop", autospec=True)
def test_non_setup(self, super_add_hop):
inst = SibraExtEphemeral()
inst.setup = False
# Call
inst._add_hop("key")
# Tests
super_add_hop.assert_called_once_with(inst, "key")
def test_setup_old_block(self):
inst = SibraExtEphemeral()
inst._get_ifids = create_mock()
inst._get_ifids.return_value = "ingress", "egress"
inst._get_prev_raw = create_mock()
inst.setup = True
block = create_mock(["info", "sofs"])
block.info = create_mock(["fwd_dir"])
block.sofs = ["sof0"]
inst.active_blocks = [block]
inst.req_block = create_mock(["add_hop"])
inst.path_ids = "path_ids"
# Call
inst._add_hop("key")
# Tests
inst._get_ifids.assert_called_once_with("sof0", block.info.fwd_dir)
inst._get_prev_raw.assert_called_once_with(req=True)
inst.req_block.add_hop.assert_called_once_with(
"ingress", "egress", inst._get_prev_raw.return_value, "key",
"path_ids")
def test_setup_new_block(self):
inst = SibraExtEphemeral()
inst._get_ifids = create_mock()
inst._get_ifids.side_effect = (
("prev_ingress", "prev_egress"),
("curr_ingress", "curr_egress"),
)
inst._get_prev_raw = create_mock()
inst.setup = True
inst.block_idx = 1
inst.rel_sof_idx = 0
prev_block = create_mock(["info", "sofs"])
prev_block.info = create_mock(["fwd_dir"])
prev_block.sofs = ["prev sof0", "prev sof1"]
curr_block = create_mock(["info", "sofs"])
curr_block.info = create_mock(["fwd_dir"])
curr_block.sofs = ["curr sof0", "curr sof1"]
inst.active_blocks = [prev_block, curr_block]
inst.req_block = create_mock(["add_hop"])
inst.path_ids = "path_ids"
# Call
inst._add_hop("key")
# Tests
assert_these_calls(inst._get_ifids, [
call("prev sof1", prev_block.info.fwd_dir),
call("curr sof0", curr_block.info.fwd_dir),
])
inst.req_block.add_hop.assert_called_once_with(
"prev_ingress", "curr_egress", inst._get_prev_raw.return_value,
"key", "path_ids")
class TestSibraExtEphemeralVerifySof(object):
"""
Unit tests for lib.sibra.ext.ephemeral.SibraExtEphemeral._verify_sof
"""
@patch("lib.sibra.ext.ephemeral.SibraExtBase._verify_sof", autospec=True)
def test_non_setup(self, super_verify):
inst = SibraExtEphemeral()
inst.setup = False
inst.path_ids = "path ids"
# Call
ntools.eq_(inst._verify_sof("key"), super_verify.return_value)
# Tests
super_verify.assert_called_once_with(inst, "key", "path ids")
@patch("lib.sibra.ext.ephemeral.SibraExtBase._verify_sof", autospec=True)
def test_setup(self, super_verify):
inst = SibraExtEphemeral()
inst.setup = True
inst.block_idx = 1
inst.path_ids = ["eph id", "steady 0", "steady 1"]
# Call
inst._verify_sof("key")
# Tests
super_verify.assert_called_once_with(inst, "key", ["steady 1"])
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| |
'''
To run a Bokeh application on a Bokeh server from a single Python script,
pass the script name to ``bokeh serve`` on the command line:
.. code-block:: sh
bokeh serve app_script.py
By default, the Bokeh application will be served by the Bokeh server on a
default port ({DEFAULT_PORT}) at localhost, under the path ``/app_script``,
i.e.,
.. code-block:: none
http://localhost:{DEFAULT_PORT}/app_script
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh serve app_notebook.ipynb
This will generate the same results as described with a python script
and the application will be served on a default port ({DEFAULT_PORT})
at localhost, under the path ``/app_notebook``
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh serve`` to run the application:
.. code-block:: sh
bokeh serve app_dir
It is possible to run multiple applications at once:
.. code-block:: sh
bokeh serve app_script.py app_dir
If you would like to automatically open a browser to display the HTML
page(s), you can pass the ``--show`` option on the command line:
.. code-block:: sh
bokeh serve app_script.py app_dir --show
This will open two pages, for ``/app_script`` and ``/app_dir``,
respectively.
If you would like to pass command line arguments to Bokeh applications,
you can pass the ``--args`` option as the LAST option on the command
line:
.. code-block:: sh
bokeh serve app_script.py myapp.py --args foo bar --baz
Everything that follows ``--args`` will be included in ``sys.argv`` when
the application runs. In this case, when ``myapp.py`` executes, the
contents of ``sys.argv`` will be ``['myapp.py', 'foo', 'bar', '--baz']``,
consistent with standard Python expectations for ``sys.argv``.
Note that if multiple scripts or directories are provided, they
all receive the same set of command line arguments (if any) given by
``--args``.
Network Configuration
~~~~~~~~~~~~~~~~~~~~~
To control the port that the Bokeh server listens on, use the ``--port``
argument:
.. code-block:: sh
bokeh serve app_script.py --port=8080
Similarly, a specific network address can be specified with the
``--address`` argument. For example:
.. code-block:: sh
bokeh serve app_script.py --address=0.0.0.0
will have the Bokeh server listen all available network addresses.
Additionally, it is possible to configure a hosts whitelist that must be
matched by the ``Host`` header in new requests. You can specify multiple
acceptable host values with the ``--host`` option:
.. code-block:: sh
bokeh serve app_script.py --host foo.com:8081 --host bar.com
If no port is specified in a host value, then port 80 will be used. In
the example above Bokeh server will accept requests from ``foo.com:8081``
and ``bar.com:80``.
If no host values are specified, then by default the Bokeh server will
accept requests from ``localhost:<port>`` where ``<port>`` is the port
that the server is configured to listen on (by default: {DEFAULT_PORT}).
If an asterix ``*`` is used in the host value (for example ``--host *``) then
it will be treated as a wildcard. As a warning, using permissive host values
like ``*`` may be insecure and open your application to HTTP host header
attacks.
Also note that the host whitelist applies to all request handlers,
including any extra ones added to extend the Bokeh server.
By default, cross site connections to the Bokeh server websocket are not
allowed. You can enable websocket connections originating from additional
hosts by specifying them with the ``--allow-websocket-origin`` option:
.. code-block:: sh
bokeh serve app_script.py --allow-websocket-origin foo.com:8081
It is possible to specify multiple allowed websocket origins by adding
the ``--allow-websocket-origin`` option multiple times.
The Bokeh server can also add an optional prefix to all URL paths.
This can often be useful in conjunction with "reverse proxy" setups.
.. code-block:: sh
bokeh serve app_script.py --prefix=foobar
Then the application will be served under the following URL:
.. code-block:: none
http://localhost:{DEFAULT_PORT}/foobar/app_script
If needed, Bokeh server can send keep-alive pings at a fixed interval.
To configure this feature, set the ``--keep-alive`` option:
.. code-block:: sh
bokeh serve app_script.py --keep-alive 10000
The value is specified in milliseconds. The default keep-alive interval
is 37 seconds. Give a value of 0 to disable keep-alive pings.
To control how often statistic logs are written, set the
--stats-log-frequency option:
.. code-block:: sh
bokeh serve app_script.py --stats-log-frequency 30000
The value is specified in milliseconds. The default interval for
logging stats is 15 seconds. Only positive integer values are accepted.
To have the Bokeh server override the remote IP and URI scheme/protocol for
all requests with ``X-Real-Ip``, ``X-Forwarded-For``, ``X-Scheme``,
``X-Forwarded-Proto`` headers (if they are provided), set the
``--use-xheaders`` option:
.. code-block:: sh
bokeh serve app_script.py --use-xheaders
This is typically needed when running a Bokeh server behind a reverse proxy
that is SSL-terminated.
.. warning::
It is not advised to set this option on a Bokeh server directly facing
the Internet.
Session ID Options
~~~~~~~~~~~~~~~~~~
Typically, each browser tab connected to a Bokeh server will have
its own session ID. When the server generates an ID, it will make
it cryptographically unguessable. This keeps users from accessing
one another's sessions.
To control who can use a Bokeh application, the server can sign
sessions with a secret key and reject "made up" session
names. There are three modes, controlled by the ``--session-ids``
argument:
.. code-block:: sh
bokeh serve app_script.py --session-ids=signed
The available modes are: {SESSION_ID_MODES}
In ``unsigned`` mode, the server will accept any session ID
provided to it in the URL. For example,
``http://localhost/app_script?bokeh-session-id=foo`` will create a
session ``foo``. In ``unsigned`` mode, if the session ID isn't
provided with ``?bokeh-session-id=`` in the URL, the server will
still generate a cryptographically-unguessable ID. However, the
server allows clients to create guessable or deliberately-shared
sessions if they want to.
``unsigned`` mode is most useful when the server is running
locally for development, for example you can have multiple
processes access a fixed session name such as
``default``. ``unsigned`` mode is also convenient because there's
no need to generate or configure a secret key.
In ``signed`` mode, the session ID must be in a special format and
signed with a secret key. Attempts to use the application with an
invalid session ID will fail, but if no ``?bokeh-session-id=``
parameter is provided, the server will generate a fresh, signed
session ID. The result of ``signed`` mode is that only secure
session IDs are allowed but anyone can connect to the server.
In ``external-signed`` mode, the session ID must be signed but the
server itself won't generate a session ID; the
``?bokeh-session-id=`` parameter will be required. To use this
mode, you would need some sort of external process (such as
another web app) which would use the
``bokeh.util.session_id.generate_session_id()`` function to create
valid session IDs. The external process and the Bokeh server must
share the same ``BOKEH_SECRET_KEY`` environment variable.
``external-signed`` mode is useful if you want another process to
authenticate access to the Bokeh server; if someone is permitted
to use the Bokeh application, you would generate a session ID for
them, then redirect them to the Bokeh server with that valid
session ID. If you don't generate a session ID for someone, then
they can't load the app from the Bokeh server.
In both ``signed`` and ``external-signed`` mode, the secret key
must be kept secret; anyone with the key can generate a valid
session ID.
The secret key should be set in a ``BOKEH_SECRET_KEY`` environment
variable and should be a cryptographically random string with at
least 256 bits (32 bytes) of entropy. You can generate a new
secret key with the ``bokeh secret`` command.
Session Expiration Options
~~~~~~~~~~~~~~~~~~~~~~~~~~
To configure how often to check for unused sessions. set the
--check-unused-sessions option:
.. code-block:: sh
bokeh serve app_script.py --check-unused-sessions 10000
The value is specified in milliseconds. The default interval for
checking for unused sessions is 17 seconds. Only positive integer
values are accepted.
To configure how often unused sessions last. set the
--unused-session-lifetime option:
.. code-block:: sh
bokeh serve app_script.py --unused-session-lifetime 60000
The value is specified in milliseconds. The default lifetime interval
for unused sessions is 15 seconds. Only positive integer values are
accepted.
Logging Options
~~~~~~~~~~~~~~~
The logging level can be controlled by the ``--log-level`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-level=debug
The available log levels are: {LOGLEVELS}
The log format can be controlled by the ``--log-format`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-format="%(levelname)s: %(message)s"
The default log format is ``"{DEFAULT_LOG_FORMAT}"``
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import argparse
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
from bokeh.server.server import Server
from bokeh.util.string import nice_join
from bokeh.settings import settings
from os import getpid
from ..subcommand import Subcommand
from ..util import build_single_handler_applications, die
LOGLEVELS = ('debug', 'info', 'warning', 'error', 'critical')
SESSION_ID_MODES = ('unsigned', 'signed', 'external-signed')
DEFAULT_LOG_FORMAT = "%(asctime)s %(message)s"
__doc__ = __doc__.format(
DEFAULT_PORT=DEFAULT_SERVER_PORT,
LOGLEVELS=nice_join(LOGLEVELS),
SESSION_ID_MODES=nice_join(SESSION_ID_MODES),
DEFAULT_LOG_FORMAT=DEFAULT_LOG_FORMAT
)
base_serve_args = (
('--port', dict(
metavar = 'PORT',
type = int,
help = "Port to listen on",
default = None
)),
('--address', dict(
metavar = 'ADDRESS',
type = str,
help = "Address to listen on",
default = None,
)),
('--log-level', dict(
metavar = 'LOG-LEVEL',
action = 'store',
default = 'info',
choices = LOGLEVELS,
help = "One of: %s" % nice_join(LOGLEVELS),
)),
('--log-format', dict(
metavar ='LOG-FORMAT',
action = 'store',
default = DEFAULT_LOG_FORMAT,
help = "A standard Python logging format string (default: %r)" % DEFAULT_LOG_FORMAT.replace("%", "%%"),
)),
)
class Serve(Subcommand):
''' Subcommand to launch the Bokeh server.
'''
name = "serve"
help = "Run a Bokeh server hosting one or more applications"
args = base_serve_args + (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='*',
help="The app directories or scripts to serve (serve empty document if not specified)",
default=None,
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
('--develop', dict(
action='store_true',
help="Enable develop-time features that should not be used in production",
)),
('--show', dict(
action='store_true',
help="Open server app(s) in a browser",
)),
('--allow-websocket-origin', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="Public hostnames which may connect to the Bokeh websocket",
)),
('--host', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="Public hostnames to allow in requests",
)),
('--prefix', dict(
metavar='PREFIX',
type=str,
help="URL prefix for Bokeh server URLs",
default=None,
)),
('--keep-alive', dict(
metavar='MILLISECONDS',
type=int,
help="How often to send a keep-alive ping to clients, 0 to disable.",
default=None,
)),
('--check-unused-sessions', dict(
metavar='MILLISECONDS',
type=int,
help="How often to check for unused sessions",
default=None,
)),
('--unused-session-lifetime', dict(
metavar='MILLISECONDS',
type=int,
help="How long unused sessions last",
default=None,
)),
('--stats-log-frequency', dict(
metavar='MILLISECONDS',
type=int,
help="How often to log stats",
default=None,
)),
('--use-xheaders', dict(
action='store_true',
help="Prefer X-headers for IP/protocol information",
)),
('--session-ids', dict(
metavar='MODE',
action = 'store',
default = None,
choices = SESSION_ID_MODES,
help = "One of: %s" % nice_join(SESSION_ID_MODES),
)),
)
def invoke(self, args):
argvs = { f : args.args for f in args.files}
applications = build_single_handler_applications(args.files, argvs)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level, format=args.log_format)
if len(applications) == 0:
# create an empty application by default, typically used with output_server
applications['/'] = Application()
if args.keep_alive is not None:
if args.keep_alive == 0:
log.info("Keep-alive ping disabled")
else:
log.info("Keep-alive ping configured every %d milliseconds", args.keep_alive)
# rename to be compatible with Server
args.keep_alive_milliseconds = args.keep_alive
if args.check_unused_sessions is not None:
log.info("Check for unused sessions every %d milliseconds", args.check_unused_sessions)
# rename to be compatible with Server
args.check_unused_sessions_milliseconds = args.check_unused_sessions
if args.unused_session_lifetime is not None:
log.info("Unused sessions last for %d milliseconds", args.unused_session_lifetime)
# rename to be compatible with Server
args.unused_session_lifetime_milliseconds = args.unused_session_lifetime
if args.stats_log_frequency is not None:
log.info("Log statistics every %d milliseconds", args.stats_log_frequency)
# rename to be compatible with Server
args.stats_log_frequency_milliseconds = args.stats_log_frequency
server_kwargs = { key: getattr(args, key) for key in ['port',
'address',
'allow_websocket_origin',
'host',
'prefix',
'develop',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
'use_xheaders',
]
if getattr(args, key, None) is not None }
server_kwargs['sign_sessions'] = settings.sign_sessions()
server_kwargs['secret_key'] = settings.secret_key_bytes()
server_kwargs['generate_session_ids'] = True
if args.session_ids is None:
# no --session-ids means use the env vars
pass
elif args.session_ids == 'unsigned':
server_kwargs['sign_sessions'] = False
elif args.session_ids == 'signed':
server_kwargs['sign_sessions'] = True
elif args.session_ids == 'external-signed':
server_kwargs['sign_sessions'] = True
server_kwargs['generate_session_ids'] = False
else:
raise RuntimeError("argparse should have filtered out --session-ids mode " +
args.session_ids)
if server_kwargs['sign_sessions'] and not server_kwargs['secret_key']:
die("To sign sessions, the BOKEH_SECRET_KEY environment variable must be set; " +
"the `bokeh secret` command can be used to generate a new key.")
server = Server(applications, **server_kwargs)
if args.show:
# we have to defer opening in browser until we start up the server
def show_callback():
for route in applications.keys():
server.show(route)
server.io_loop.add_callback(show_callback)
if args.develop:
log.info("Using develop mode (do not enable --develop in production)")
address_string = ''
if server.address is not None and server.address != '':
address_string = ' address ' + server.address
log.info("Starting Bokeh server on port %d%s with applications at paths %r",
server.port,
address_string,
sorted(applications.keys()))
log.info("Staring Bokeh server with process id: %d" % getpid())
server.start()
| |
from django.test import SimpleTestCase, TestCase
from corehq.util.es.elasticsearch import ConnectionError
from eulxml.xpath import parse as parse_xpath
from casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure
from pillowtop.es_utils import initialize_index_and_mapping
from corehq.apps.case_search.filter_dsl import (
CaseFilterError,
build_filter_from_ast,
)
from corehq.apps.es import CaseSearchES
from corehq.apps.es.tests.utils import ElasticTestMixin, es_test
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.pillows.case_search import transform_case_for_elasticsearch
from corehq.pillows.mappings.case_search_mapping import CASE_SEARCH_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import trap_extra_setup
@es_test
class TestFilterDsl(ElasticTestMixin, SimpleTestCase):
def test_simple_filter(self):
parsed = parse_xpath("name = 'farid'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "farid"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_date_comparison(self):
parsed = parse_xpath("dob >= '2017-02-12'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "dob"
}
}
],
"must": {
"range": {
"case_properties.value.date": {
"gte": "2017-02-12"
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_comparison(self):
parsed = parse_xpath("number <= '100.32'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "number"
}
}
],
"must": {
"range": {
"case_properties.value.numeric": {
"lte": 100.32
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_comparison_negative(self):
parsed = parse_xpath("number <= -100.32")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "number"
}
}
],
"must": {
"range": {
"case_properties.value.numeric": {
"lte": -100.32
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_equality_negative(self):
parsed = parse_xpath("number = -100.32")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"term": {
"case_properties.key.exact": "number"
}
},
{
"term": {
"case_properties.value.exact": -100.32
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_case_property_existence(self):
parsed = parse_xpath("property != ''")
expected_filter = {
"bool": {
"must_not": {
"bool": {
"should": [
{
"bool": {
"must_not": {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "property"
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "property"
}
},
{
"term": {
"case_properties.value.exact": ""
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
]
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_nested_filter(self):
parsed = parse_xpath("(name = 'farid' or name = 'leila') and dob <= '2017-02-11'")
expected_filter = {
"bool": {
"filter": [
{
"bool": {
"should": [
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "farid"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "leila"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
]
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "dob"
}
}
],
"must": {
"range": {
"case_properties.value.date": {
"lte": "2017-02-11"
}
}
}
}
}
}
}
]
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_self_reference(self):
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("name = other_property"))
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("name > other_property"))
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("parent/name > other_property"))
@es_test
class TestFilterDslLookups(ElasticTestMixin, TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
super(TestFilterDslLookups, cls).setUpClass()
with trap_extra_setup(ConnectionError):
cls.es = get_es_new()
initialize_index_and_mapping(cls.es, CASE_SEARCH_INDEX_INFO)
cls.child_case_id = 'margaery'
cls.parent_case_id = 'mace'
cls.grandparent_case_id = 'olenna'
cls.domain = "Tyrell"
factory = CaseFactory(domain=cls.domain)
grandparent_case = CaseStructure(
case_id=cls.grandparent_case_id,
attrs={
'create': True,
'case_type': 'grandparent',
'update': {
"name": "Olenna",
"alias": "Queen of thorns",
"house": "Tyrell",
},
})
parent_case = CaseStructure(
case_id=cls.parent_case_id,
attrs={
'create': True,
'case_type': 'parent',
'update': {
"name": "Mace",
"house": "Tyrell",
},
},
indices=[CaseIndex(
grandparent_case,
identifier='mother',
relationship='child',
)])
child_case = CaseStructure(
case_id=cls.child_case_id,
attrs={
'create': True,
'case_type': 'child',
'update': {
"name": "Margaery",
"house": "Tyrell",
},
},
indices=[CaseIndex(
parent_case,
identifier='father',
relationship='extension',
)],
)
for case in factory.create_or_update_cases([child_case]):
send_to_elasticsearch('case_search', transform_case_for_elasticsearch(case.to_json()))
cls.es.indices.refresh(CASE_SEARCH_INDEX_INFO.index)
@classmethod
def tearDownClass(self):
FormProcessorTestUtils.delete_all_cases()
ensure_index_deleted(CASE_SEARCH_INDEX_INFO.index)
super(TestFilterDslLookups, self).tearDownClass()
def test_parent_lookups(self):
parsed = parse_xpath("father/name = 'Mace'")
# return all the cases who's parent (relationship named 'father') has case property 'name' = 'Mace'
expected_filter = {
"nested": {
"path": "indices",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"terms": {
"indices.referenced_id": [self.parent_case_id]
}
},
{
"term": {
"indices.identifier": "father"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast(self.domain, parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
self.assertEqual([self.child_case_id], CaseSearchES().filter(built_filter).values_list('_id', flat=True))
def test_nested_parent_lookups(self):
parsed = parse_xpath("father/mother/house = 'Tyrell'")
expected_filter = {
"nested": {
"path": "indices",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"terms": {
"indices.referenced_id": [self.parent_case_id]
}
},
{
"term": {
"indices.identifier": "father"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast(self.domain, parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
self.assertEqual([self.child_case_id], CaseSearchES().filter(built_filter).values_list('_id', flat=True))
| |
# -*- encoding: utf-8 -*-
"""
Haiku.py
Based on Twitter API
(Japanese) http://watcher.moe-nifty.com/memo/docs/twitterAPI13.txt
(Original) http://apiwiki.twitter.com/REST+API+Documentation
"""
__author__="ymotongpoo <ymotongpoo@gmail.com>"
__date__ ="$2008/11/22 09:57:30$"
__version__="$Revision: 0.10"
__credits__="0x7d8 -- programming training"
import urllib
import urllib2
FORMAT = set(['xml', 'rss', 'json', 'atom'])
default_format = 'json'
class Twitter:
def __init__(self, username, password, base_url='', proxy_host='', proxy_port=''):
self.username = username
self.password = password
self.base_url = base_url if len(base_url) > 0 else 'http://twitter.com/'
if len(proxy_host) > 0 and type(proxy) is IntType:
self.proxies = {'http': proxy_host + ':' + proxy_port}
else:
self.proxies = {}
def __create_opener(self):
if 'http' in self.proxies:
proxy_handler = urllib2.ProxyHandler(self.proxies)
auth_handler = urllib2.ProxyBasicAuthHandler()
else:
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password('Twitter API', self.base_url, self.username, self.password)
if 'http' in self.proxies:
opener = urllib2.build_opener(proxy_handler, auth_handler)
else:
opener = urllib2.build_opener(auth_handler)
return opener
def __add_format(self, url, format):
if format in FORMAT:
url = url + '.' + format
else:
url = url + '.' + default_format
return url
def __open_url_in_get(self, url, get_dict={}):
opener = self.__create_opener()
urllib2.install_opener(opener)
if len(get_dict) > 0:
params = urllib.urlencode(get_dict)
f = urllib2.urlopen(url + '?' + params)
else:
f = urllib2.urlopen(url)
return f.read()
def __open_url_in_post(self, url, post_dict={}):
opener = self.__create_opener()
urllib2.install_opener(opener)
if len(post_dict) > 0:
params = urllib.urlencode(post_dict)
f = urllib2.urlopen(url, params)
return f.read()
else:
return
def __query_dict_generator(self, func_args):
get_dict = {}
if 'since_id' in func_args and func_args['since_id'] > 0 and type(func_args['since_id']) is int:
get_dict['since_id'] = func_args['since_id']
if 'twitterid' in func_args and len(func_args['twitterid']) > 0:
get_dict['id'] = func_args['twitterid']
if 'since' in func_args and len(func_args['since']) > 0:
get_dict['since'] = func_args['since']
if 'page' in func_args and func_args['page'] > 0 and type(func_args['page']) is int:
get_dict['page'] = func_args['page']
if 'lite' in func_args and not func_args['lite']:
get_dict['lite'] = 'true'
return get_dict
def __get_request_without_options(self, url_part, format):
url = self.base_url + url_part
url = self.__add_format(url, format)
d = self.__open_url_in_get(url)
return d
def __get_request_with_options(self, url_part, format, dict):
url = self.base_url + url_part
url = self.__add_format(url, format)
get_dict = self.__query_dict_generator(dict)
d = self.__open_url_in_get(url, get_dict)
return d
def publicTimeline(self, since_id=-1, format=default_format):
return self.__get_request_with_options('statuses/public_timeline', format, locals())
def friendsTimeline(self, twitterid='', since='', page=1, format=default_format):
"""
since -- expects same type as the return of strftime()
"""
url_part = 'statuses/friends_timeline'
if len(twitterid) > 0:
url_part = url_part + '/' + twitterid
return self.__get_request_with_options(url_part, format, locals())
def userTimeline(self, twitterid='', count=20, since='', since_id='', page=1, format=default_format):
"""
since -- expects same type as the return of strftime()
"""
url_part = 'statuses/user_timeline'
if len(twitterid) > 0:
url_part = url_part + '/' + twitterid
return self.__get_request_with_options(url_part, format, locals())
def showStatusByID(self, status_id, format=default_format):
if type(status_id) is int:
status_id = str(status_id)
return self.__get_request_without_options('statuses/show/' + status_id, format)
def updateStatus(self, status, source='', format=default_format):
url = self.base_url + 'statuses/update'
url = self.__add_format(url, format)
post_dict = {}
if len(status) <= 160:
post_dict['status'] = status
if len(source) > 0:
post_dict['source'] = source
d = self.__open_url_in_post(url, post_dict)
return d
def repliesPost(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('statuses/replies', format, locals())
def destroyPost(self, status_id, format=default_format):
return self.__get_request_without_options('statuses/destroy/' + status_id, format)
def friendsList(self, twitterid='', page=1, lite=False, since='', format=default_format):
return self.__get_request_with_options('statuses/friends', format, locals())
def followersList(self, twitterid='', page=1, lite=False, format=default_format):
return self.__get_request_with_options('statuses/followers', format, locals())
def featured(self, format=default_format):
return self.__get_request_without_options('statuses/featured', format)
def showUserInfo(self, twitterid, email='', format=default_format):
url = self.base_url + 'users/show'
get_dict = {}
if 'email' in locals():
url = self.__add_format(url, format)
get_dict['email'] = email
else:
url = url + '/' + twitterid
url = self.__add_format(url, format)
for k, v in self.__query_dict_generator(locals()):
get_dict[k] = v
d = self.__open_url_in_get(url, get_dict)
return d
def directMsgs(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('direct_messages', format, locals())
def sentMsgs(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('direct_messages/sent', format, locals())
def sendNewDirectMsg(self, user, text, format=default_format):
if len(user) > 0 and len(text) > 0:
url = self.base_url + 'direct_messages/new'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
else:
sys.exit(0)
def destroyDirectMsg(self, msgid, format=default_format):
if type(msgid) is int:
url = self.base_url + 'direct_messages/destroy/'
url = self.__add_format(url, format)
get_dict['id'] = msgid
d = self.__open_url_in_get(url, get_dict)
return d
else:
return
def createFriend(self, twitterid, format=default_format):
return self.__get_request_without_options('friendships/create/' + twitterid, format)
def destroyFriend(self, twitterid, format=default_format):
return self.__get_request_without_options('friendships/destroy/' + twitterid, format)
def existsRelationship(self, user_a, user_b, format=default_format):
url = self.base_url + 'friendships/exists'
url = self.__add_format(url, format)
get_dict = {}
get_dict['user_a'] = user_a
get_dict['user_b'] = user_b
d = self.__open_url_in_get(url, get_dict)
return d
def verifyCredentials(self, format=default_format):
url = self.base_url + 'account/verify_credentials'
url = self.__add_format(url, format)
d = self.__open_url_in_get(url)
return d
def endSession(self):
url = self.base_url + 'account/end_session'
d = self.__open_url_in_get(url)
return d
# *** 'archive' was no longer available ***
#
# def archivePost(self, page=1, since='', since_id='', format=default_format):
# return self.__get_request_with_options('account/archive', format, locals())
def updateLocation(self, location, format=default_format):
url = self.base_url + 'account/update_location'
url = self.__add_format(url, format)
get_dict = {}
get_dict['location'] = location
d = self.__open_url_in_get(url, get_dict)
return d
def updateDeliveryDevice(self, device, format=default_format):
devices = set(['sms', 'im', 'none'])
url = self.base_url + 'account/update_delivery_device'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
def rateLimitStatus(self, format=default_format):
return self.__get_request_without_options('account/rate_limit_status', format)
def favoritesPost(self, twitterid='', page=1, format=default_format):
return self.__get_request_with_options('favorites', format, locals())
def createFavorite(self, twitterid, format=default_format):
return self.__get_request_without_options('favourings/create/' + twitterid, format)
def destroyFavorite(self, twitterid, format=default_format):
return self.__get_request_without_options('favourings/destory/' + twitterid, format)
def followIM(self, twitterid, format=default_format):
return self.__get_request_without_options('notifications/follow/' + twitterid, format)
def leaveIM(self, twitterid, format=default_format):
return self.__get_request_without_options('notifications/leave/' + twitterid, format)
def createBlock(self, twitterid, format=default_format):
return self.__get_request_without_options('blocks/create/' + twitterid, format)
def destoryBlock(self, twitterid, format=default_format):
return self.__get_request_without_options('blocks/destroy/' + twitterid, format)
def testConnection(self, format=default_format):
return self.__get_request_without_options('help/test', format)
def downtimeSchedule(self, format=default_format):
return self.__get_request_without_options('help/downtime_schedule', format)
def updateProfileColors(self, bg='', txt='', link='', sbfill='', sbbdr='', format=default_format):
url = self.base_url + 'account/update_profile_colors'
url = self.__add_format(url, format)
post_dict = {}
if len(bg) > 0:
post_dict['profile_background_color'] = bg
if len(txt) > 0:
post_dict['profile_text_color'] = txt
if len(link) > 0:
post_dict['profile_link_color'] = link
if len(sbfill) > 0:
post_dict['profile_sidebar_fill_color'] = sbfill
if len(sbbdr) > 0:
post_dict['profile_sidebar_border_color'] = sbbdr
d = self.__open_url_in_post(url, post_dict)
return d
def updateProfileBackgroundImage(self, image='', format=default_format):
url = self.base_url + 'account/update_profile_background_image'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
| |
from functools import reduce
import operator as op
import numpy as np
import pandas as pd
from zipline.lib.labelarray import LabelArray
from zipline.pipeline import Classifier
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.expression import methods_to_ops
from zipline.testing import parameter_space
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
)
from .base import BaseUSEquityPipelineTestCase
bytes_dtype = np.dtype('S3')
unicode_dtype = np.dtype('U3')
class ClassifierTestCase(BaseUSEquityPipelineTestCase):
@parameter_space(mv=[-1, 0, 1, 999])
def test_integral_isnull(self, mv):
class C(Classifier):
dtype = int64_dtype
missing_value = mv
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'isnull': c.isnull(),
'notnull': c.notnull()
},
expected={
'isnull': data == mv,
'notnull': data != mv,
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(mv=['0', None])
def test_string_isnull(self, mv):
class C(Classifier):
dtype = categorical_dtype
missing_value = mv
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
raw = np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=categorical_dtype,
)
data = LabelArray(raw, missing_value=mv)
self.check_terms(
terms={
'isnull': c.isnull(),
'notnull': c.notnull()
},
expected={
'isnull': np.equal(raw, mv),
'notnull': np.not_equal(raw, mv),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(compval=[0, 1, 999])
def test_eq(self, compval):
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'eq': c.eq(compval),
},
expected={
'eq': (data == compval),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=['a', 'ab', 'not in the array'],
labelarray_dtype=(bytes_dtype, categorical_dtype, unicode_dtype),
)
def test_string_eq(self, compval, labelarray_dtype):
compval = labelarray_dtype.type(compval)
class C(Classifier):
dtype = categorical_dtype
missing_value = ''
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value='',
)
self.check_terms(
terms={
'eq': c.eq(compval),
},
expected={
'eq': (data == compval),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
missing=[-1, 0, 1],
dtype_=[int64_dtype, categorical_dtype],
)
def test_disallow_comparison_to_missing_value(self, missing, dtype_):
if dtype_ == categorical_dtype:
missing = str(missing)
class C(Classifier):
dtype = dtype_
missing_value = missing
inputs = ()
window_length = 0
with self.assertRaises(ValueError) as e:
C().eq(missing)
errmsg = str(e.exception)
self.assertEqual(
errmsg,
"Comparison against self.missing_value ({v!r}) in C.eq().\n"
"Missing values have NaN semantics, so the requested comparison"
" would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
v=missing,
),
)
@parameter_space(compval=[0, 1, 999], missing=[-1, 0, 999])
def test_not_equal(self, compval, missing):
class C(Classifier):
dtype = int64_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'ne': c != compval,
},
expected={
'ne': (data != compval) & (data != C.missing_value),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=['a', 'ab', '', 'not in the array'],
missing=['a', 'ab', '', 'not in the array'],
labelarray_dtype=(bytes_dtype, unicode_dtype, categorical_dtype),
)
def test_string_not_equal(self, compval, missing, labelarray_dtype):
compval = labelarray_dtype.type(compval)
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value=missing,
)
expected = (
(data.as_int_array() != data.reverse_categories.get(compval, -1)) &
(data.as_int_array() != data.reverse_categories[C.missing_value])
)
self.check_terms(
terms={
'ne': c != compval,
},
expected={
'ne': expected,
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=[u'a', u'b', u'ab', u'not in the array'],
missing=[u'a', u'ab', u'', u'not in the array'],
labelarray_dtype=(categorical_dtype, bytes_dtype, unicode_dtype),
)
def test_string_elementwise_predicates(self,
compval,
missing,
labelarray_dtype):
if labelarray_dtype == bytes_dtype:
compval = compval.encode('utf-8')
missing = missing.encode('utf-8')
startswith_re = b'^' + compval + b'.*'
endswith_re = b'.*' + compval + b'$'
substring_re = b'.*' + compval + b'.*'
else:
startswith_re = '^' + compval + '.*'
endswith_re = '.*' + compval + '$'
substring_re = '.*' + compval + '.*'
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value=missing,
)
terms = {
'startswith': c.startswith(compval),
'endswith': c.endswith(compval),
'has_substring': c.has_substring(compval),
# Equivalent filters using regex matching.
'startswith_re': c.matches(startswith_re),
'endswith_re': c.matches(endswith_re),
'has_substring_re': c.matches(substring_re),
}
expected = {
'startswith': (data.startswith(compval) & (data != missing)),
'endswith': (data.endswith(compval) & (data != missing)),
'has_substring': (data.has_substring(compval) & (data != missing)),
}
for key in list(expected):
expected[key + '_re'] = expected[key]
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
container_type=(set, list, tuple, frozenset),
labelarray_dtype=(categorical_dtype, bytes_dtype, unicode_dtype),
)
def test_element_of_strings(self, container_type, labelarray_dtype):
missing = labelarray_dtype.type("not in the array")
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
raw = np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
)
data = LabelArray(raw, missing_value=missing)
choices = [
container_type(choices) for choices in [
[],
['a', ''],
['a', 'a', 'a', 'ab', 'a'],
set(data.reverse_categories) - {missing},
['random value', 'ab'],
['_' * i for i in range(30)],
]
]
def make_expected(choice_set):
return np.vectorize(choice_set.__contains__, otypes=[bool])(raw)
terms = {str(i): c.element_of(s) for i, s in enumerate(choices)}
expected = {str(i): make_expected(s) for i, s in enumerate(choices)}
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
def test_element_of_integral(self):
"""
Element of is well-defined for integral classifiers.
"""
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
terms = {}
expected = {}
for choices in [(0,), (0, 1), (0, 1, 2)]:
terms[str(choices)] = c.element_of(choices)
expected[str(choices)] = reduce(
op.or_,
(data == elem for elem in choices),
np.zeros_like(data, dtype=bool),
)
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
def test_element_of_rejects_missing_value(self):
"""
Test that element_of raises a useful error if we attempt to pass it an
array of choices that include the classifier's missing_value.
"""
missing = "not in the array"
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
for bad_elems in ([missing], [missing, 'random other value']):
with self.assertRaises(ValueError) as e:
c.element_of(bad_elems)
errmsg = str(e.exception)
expected = (
"Found self.missing_value ('not in the array') in choices"
" supplied to C.element_of().\n"
"Missing values have NaN semantics, so the requested"
" comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {}.".format(bad_elems)
)
self.assertEqual(errmsg, expected)
@parameter_space(dtype_=Classifier.ALLOWED_DTYPES)
def test_element_of_rejects_unhashable_type(self, dtype_):
class C(Classifier):
dtype = dtype_
missing_value = dtype.type('1')
inputs = ()
window_length = 0
c = C()
with self.assertRaises(TypeError) as e:
c.element_of([{'a': 1}])
errmsg = str(e.exception)
expected = (
"Expected `choices` to be an iterable of hashable values,"
" but got [{'a': 1}] instead.\n"
"This caused the following error: "
"TypeError(\"unhashable type: 'dict'\",)."
)
self.assertEqual(errmsg, expected)
@parameter_space(
__fail_fast=True,
labelarray_dtype=(categorical_dtype, bytes_dtype, unicode_dtype),
relabel_func=[
lambda s: str(s[0]),
lambda s: str(len(s)),
lambda s: str(len([c for c in s if c == 'a'])),
lambda s: None,
]
)
def test_relabel_strings(self, relabel_func, labelarray_dtype):
class C(Classifier):
inputs = ()
dtype = categorical_dtype
missing_value = None
window_length = 0
c = C()
raw = np.asarray(
[['a', 'aa', 'aaa', 'abab'],
['bab', 'aba', 'aa', 'bb'],
['a', 'aba', 'abaa', 'abaab'],
['a', 'aa', 'aaa', 'aaaa']],
dtype=labelarray_dtype,
)
raw_relabeled = np.vectorize(relabel_func, otypes=[object])(raw)
data = LabelArray(raw, missing_value=None)
terms = {
'relabeled': c.relabel(relabel_func),
}
expected_results = {
'relabeled': LabelArray(raw_relabeled, missing_value=None),
}
self.check_terms(
terms,
expected_results,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
missing_value=[None, 'M'],
)
def test_relabel_missing_value_interactions(self, missing_value):
mv = missing_value
class C(Classifier):
inputs = ()
dtype = categorical_dtype
missing_value = mv
window_length = 0
c = C()
def relabel_func(s):
if s == 'B':
return mv
return ''.join([s, s])
raw = np.asarray(
[['A', 'B', 'C', mv],
[mv, 'A', 'B', 'C'],
['C', mv, 'A', 'B'],
['B', 'C', mv, 'A']],
dtype=categorical_dtype,
)
data = LabelArray(raw, missing_value=mv)
expected_relabeled_raw = np.asarray(
[['AA', mv, 'CC', mv],
[mv, 'AA', mv, 'CC'],
['CC', mv, 'AA', mv],
[mv, 'CC', mv, 'AA']],
dtype=categorical_dtype,
)
terms = {
'relabeled': c.relabel(relabel_func),
}
expected_results = {
'relabeled': LabelArray(expected_relabeled_raw, missing_value=mv),
}
self.check_terms(
terms,
expected_results,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
def test_relabel_int_classifier_not_yet_supported(self):
class C(Classifier):
inputs = ()
dtype = int64_dtype
missing_value = -1
window_length = 0
c = C()
with self.assertRaises(TypeError) as e:
c.relabel(lambda x: 0 / 0) # Function should never be called.
result = str(e.exception)
expected = (
"relabel() is only defined on Classifiers producing strings "
"but it was called on a Classifier of dtype int64."
)
self.assertEqual(result, expected)
@parameter_space(
compare_op=[op.gt, op.ge, op.le, op.lt],
dtype_and_missing=[(int64_dtype, 0), (categorical_dtype, '')],
)
def test_bad_compare(self, compare_op, dtype_and_missing):
class C(Classifier):
inputs = ()
window_length = 0
dtype = dtype_and_missing[0]
missing_value = dtype_and_missing[1]
with self.assertRaises(TypeError) as e:
compare_op(C(), object())
self.assertEqual(
str(e.exception),
'cannot compare classifiers with %s' % (
methods_to_ops['__%s__' % compare_op.__name__],
),
)
@parameter_space(
dtype_and_missing=[(int64_dtype, -1), (categorical_dtype, None)],
use_mask=[True, False],
)
def test_peer_count(self, dtype_and_missing, use_mask):
class C(Classifier):
dtype = dtype_and_missing[0]
missing_value = dtype_and_missing[1]
inputs = ()
window_length = 0
c = C()
if dtype_and_missing[0] == int64_dtype:
data = np.array(
[[1, 1, -1, 2, 1, -1],
[2, 1, 3, 2, 2, 2],
[-1, 1, 10, 10, 10, -1],
[3, 3, 3, 3, 3, 3]],
dtype=int64_dtype,
)
else:
data = LabelArray(
[['a', 'a', None, 'b', 'a', None],
['b', 'a', 'c', 'b', 'b', 'b'],
[None, 'a', 'aa', 'aa', 'aa', None],
['c', 'c', 'c', 'c', 'c', 'c']],
missing_value=None,
)
if not use_mask:
mask = self.build_mask(self.ones_mask(shape=data.shape))
expected = np.array(
[[3, 3, np.nan, 1, 3, np.nan],
[4, 1, 1, 4, 4, 4],
[np.nan, 1, 3, 3, 3, np.nan],
[6, 6, 6, 6, 6, 6]],
)
else:
# Punch a couple holes in the mask to check that we handle the mask
# correctly.
mask = self.build_mask(
np.array([[1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 1, 1]], dtype='bool')
)
expected = np.array(
[[2, 2, np.nan, 1, np.nan, np.nan],
[3, 1, 1, 3, 3, np.nan],
[np.nan, 1, 3, 3, 3, np.nan],
[4, 4, np.nan, np.nan, 4, 4]],
)
terms = {
'peer_counts': c.peer_count(),
}
expected_results = {
'peer_counts': expected,
}
self.check_terms(
terms=terms,
expected=expected_results,
initial_workspace={c: data},
mask=mask,
)
class TestPostProcessAndToWorkSpaceValue(ZiplineTestCase):
def test_reversability_categorical(self):
class F(Classifier):
inputs = ()
window_length = 0
dtype = categorical_dtype
missing_value = '<missing>'
f = F()
column_data = LabelArray(
np.array(
[['a', f.missing_value],
['b', f.missing_value],
['c', 'd']],
),
missing_value=f.missing_value,
)
assert_equal(
f.postprocess(column_data.ravel()),
pd.Categorical(
['a', f.missing_value, 'b', f.missing_value, 'c', 'd'],
),
)
# only include the non-missing data
pipeline_output = pd.Series(
data=['a', 'b', 'c', 'd'],
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
dtype='category',
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
def test_reversability_int64(self):
class F(Classifier):
inputs = ()
window_length = 0
dtype = int64_dtype
missing_value = -1
f = F()
column_data = np.array(
[[0, f.missing_value],
[1, f.missing_value],
[2, 3]],
)
assert_equal(f.postprocess(column_data.ravel()), column_data.ravel())
# only include the non-missing data
pipeline_output = pd.Series(
data=[0, 1, 2, 3],
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
dtype=int64_dtype,
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
class ReprTestCase(ZiplineTestCase):
def test_quantiles_graph_repr(self):
quantiles = TestingDataSet.float_col.latest.quantiles(5)
self.assertEqual(quantiles.graph_repr(), "Quantiles(5)")
| |
from flask_classy import route
from flask import request, jsonify, Response
from marvin.tools.query import doQuery, Query
from marvin.core.exceptions import MarvinError
from marvin.api.base import BaseView, arg_validate as av
from marvin.utils.db import get_traceback
import json
def _getCubes(searchfilter, **kwargs):
"""Run query locally at Utah."""
release = kwargs.pop('release', None)
kwargs['returnparams'] = kwargs.pop('params', None)
kwargs['returntype'] = kwargs.pop('rettype', None)
try:
# q, r = doQuery(searchfilter=searchfilter, returnparams=params, release=release,
# mode='local', returntype=rettype, limit=limit, order=order, sort=sort)
q, r = doQuery(searchfilter=searchfilter, release=release, **kwargs)
except Exception as e:
raise MarvinError('Query failed with {0}: {1}'.format(e.__class__.__name__, e))
results = r.results
# get the subset keywords
start = kwargs.get('start', None)
end = kwargs.get('end', None)
limit = kwargs.get('limit', None)
params = kwargs.get('params', None)
# get a subset
chunk = None
if start:
chunk = int(end) - int(start)
results = r.getSubset(int(start), limit=chunk)
chunk = limit if not chunk else limit
runtime = {'days': q.runtime.days, 'seconds': q.runtime.seconds, 'microseconds': q.runtime.microseconds}
output = dict(data=results, query=r.showQuery(), chunk=limit,
filter=searchfilter, params=q.params, returnparams=params, runtime=runtime,
queryparams_order=q.queryparams_order, count=len(results), totalcount=r.count)
return output
class QueryView(BaseView):
"""Class describing API calls related to queries."""
def index(self):
'''Returns general query info
.. :quickref: Query; Get general query info
:query string release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: data message
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": "this is a query!"
}
'''
self.results['data'] = 'this is a query!'
self.results['status'] = 1
return jsonify(self.results)
@route('/cubes/', methods=['GET', 'POST'], endpoint='querycubes')
@av.check_args(use_params='query', required='searchfilter')
def cube_query(self, args):
''' Performs a remote query
.. :quickref: Query; Perform a remote query
:query string release: the release of MaNGA
:form searchfilter: your string searchfilter expression
:form params: the list of return parameters
:form rettype: the string indicating your Marvin Tool conversion object
:form limit: the limiting number of results to return for large results
:form sort: a string parameter name to sort on
:form order: the order of the sort, either ``desc`` or ``asc``
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list results: the list of results
:json string query: the raw SQL string of your query
:json int chunk: the page limit of the results
:json string filter: the searchfilter used
:json list returnparams: the list of return parameters
:json list params: the list of parameters used in the query
:json list queryparams_order: the list of parameters used in the query
:json dict runtime: a dictionary of query time (days, minutes, seconds)
:json int totalcount: the total count of results
:json int count: the count in the current page of results
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cubes/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5", "searchfilter": "nsa.z<0.1"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"chunk": 100,
"count": 4,
"data": [["1-209232",8485,"8485-1901","1901",0.0407447],
["1-209113",8485,"8485-1902","1902",0.0378877],
["1-209191",8485,"8485-12701","12701",0.0234253],
["1-209151",8485,"8485-12702","12702",0.0185246]
],
"filter": "nsa.z<0.1",
"params": ["cube.mangaid","cube.plate","cube.plateifu","ifu.name","nsa.z"],
"query": "SELECT ... FROM ... WHERE ...",
"queryparams_order": ["mangaid","plate","plateifu","name","z"],
"returnparams": null,
"runtime": {"days": 0,"microseconds": 55986,"seconds": 0},
"totalcount": 4
}
'''
searchfilter = args.pop('searchfilter', None)
# searchfilter = self.results['inconfig'].get('searchfilter', None)
# params = self.results['inconfig'].get('params', None)
# rettype = self.results['inconfig'].get('returntype', None)
# limit = self.results['inconfig'].get('limit', 100)
# sort = self.results['inconfig'].get('sort', None)
# order = self.results['inconfig'].get('order', 'asc')
# release = self.results['inconfig'].get('release', None)
try:
# res = _getCubes(searchfilter, params=params, rettype=rettype,
# limit=limit, sort=sort, order=order, release=release)
res = _getCubes(searchfilter, **args)
except MarvinError as e:
self.results['error'] = str(e)
self.results['traceback'] = get_traceback(asstring=True)
else:
self.results['status'] = 1
self.update_results(res)
# this needs to be json.dumps until sas-vm at Utah updates to 2.7.11
return Response(json.dumps(self.results), mimetype='application/json')
@route('/cubes/getsubset/', methods=['GET', 'POST'], endpoint='getsubset')
@av.check_args(use_params='query', required=['searchfilter', 'start', 'end'])
def query_getsubset(self, args):
''' Remotely grab a subset of results from a query
.. :quickref: Query; Grab a subset of results from a remote query
:query string release: the release of MaNGA
:form searchfilter: your string searchfilter expression
:form params: the list of return parameters
:form rettype: the string indicating your Marvin Tool conversion object
:form start: the starting page index of results you wish to grab
:form end: the ending page index of the results you wish to grab
:form limit: the limiting number of results to return for large results
:form sort: a string parameter name to sort on
:form order: the order of the sort, either ``desc`` or ``asc``
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list results: the list of results
:json string query: the raw SQL string of your query
:json int chunk: the page limit of the results
:json string filter: the searchfilter used
:json list returnparams: the list of return parameters
:json list params: the list of parameters used in the query
:json list queryparams_order: the list of parameters used in the query
:json dict runtime: a dictionary of query time (days, minutes, seconds)
:json int totalcount: the total count of results
:json int count: the count in the current page of results
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cubes/getsubset/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5", "searchfilter": "nsa.z<0.1", "start":10, "end":15},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"chunk": 100,
"count": 4,
"data": [["1-209232",8485,"8485-1901","1901",0.0407447],
["1-209113",8485,"8485-1902","1902",0.0378877],
["1-209191",8485,"8485-12701","12701",0.0234253],
["1-209151",8485,"8485-12702","12702",0.0185246]
],
"filter": "nsa.z<0.1",
"params": ["cube.mangaid","cube.plate","cube.plateifu","ifu.name","nsa.z"],
"query": "SELECT ... FROM ... WHERE ...",
"queryparams_order": ["mangaid","plate","plateifu","name","z"],
"returnparams": null,
"runtime": {"days": 0,"microseconds": 55986,"seconds": 0},
"totalcount": 4
}
'''
searchfilter = args.pop('searchfilter', None)
# searchfilter = self.results['inconfig'].get('searchfilter', None)
# params = self.results['inconfig'].get('params', None)
# start = self.results['inconfig'].get('start', None)
# end = self.results['inconfig'].get('end', None)
# rettype = self.results['inconfig'].get('returntype', None)
# limit = self.results['inconfig'].get('limit', 100)
# sort = self.results['inconfig'].get('sort', None)
# order = self.results['inconfig'].get('order', 'asc')
# release = self.results['inconfig'].get('release', None)
try:
# res = _getCubes(searchfilter, params=params, start=int(start),
# end=int(end), rettype=rettype, limit=limit,
# sort=sort, order=order, release=release)
res = _getCubes(searchfilter, **args)
except MarvinError as e:
self.results['error'] = str(e)
self.results['traceback'] = get_traceback(asstring=True)
else:
self.results['status'] = 1
self.update_results(res)
# this needs to be json.dumps until sas-vm at Utah updates to 2.7.11
return Response(json.dumps(self.results), mimetype='application/json')
@route('/getparamslist/', methods=['GET', 'POST'], endpoint='getparams')
@av.check_args(use_params='query', required='paramdisplay')
def getparamslist(self, args):
''' Retrieve a list of all available input parameters into the query
.. :quickref: Query; Get a list of all or "best" queryable parameters
:query string release: the release of MaNGA
:form paramdisplay: ``all`` or ``best``, type of parameters to return
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json list params: the list of queryable parameters
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/getparamslist/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": ['nsa.z', 'cube.ra', 'cube.dec', ...]
}
'''
paramdisplay = args.pop('paramdisplay', 'all')
q = Query(mode='local')
if paramdisplay == 'all':
params = q.get_available_params()
elif paramdisplay == 'best':
params = q.get_best_params()
self.results['data'] = params
self.results['status'] = 1
output = jsonify(self.results)
return output
@route('/cleanup/', methods=['GET', 'POST'], endpoint='cleanupqueries')
@av.check_args(use_params='query', required='task')
def cleanup(self, args):
''' Clean up idle server-side queries or retrieve the list of them
Do not use!
.. :quickref: Query; Send a cleanup command to the server-side database
:query string release: the release of MaNGA
:form task: ``clean`` or ``getprocs``, the type of task to run
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson string data: dictionary of returned data
:json string clean: clean success message
:json list procs: the list of processes currently running on the db
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/query/cleanup/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": 'clean success'
}
'''
task = args.pop('task', None)
if task == 'clean':
q = Query(mode='local')
q._cleanUpQueries()
res = {'status': 1, 'data': 'clean success'}
elif task == 'getprocs':
q = Query(mode='local')
procs = q._getIdleProcesses()
procs = [{k: v for k, v in y.items()} for y in procs]
res = {'status': 1, 'data': procs}
else:
res = {'status': -1, 'data': None, 'error': 'Task is None or not in [clean, getprocs]'}
self.update_results(res)
output = jsonify(self.results)
return output
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession()
# Create a multilayer model.
# Input placeholders
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)
# We can't initialize these variables to 0 - the network will get stuck.
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations
hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability', keep_prob)
dropped = tf.nn.dropout(hidden1, keep_prob)
# Do not apply softmax activation yet, see below.
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
with tf.name_scope('cross_entropy'):
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the
# raw outputs of the nn_layer above, and then average across
# the batch.
diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')
tf.global_variables_initializer().run()
# Train the model, and also write summaries.
# Every 10th step, measure test-set accuracy, and write test summaries
# All other steps, run train_step on training data, & add training summaries
def feed_dict(train):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train or FLAGS.fake_data:
xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
xs, ys = mnist.test.images, mnist.test.labels
k = 1.0
return {x: xs, y_: ys, keep_prob: k}
for i in range(FLAGS.max_steps):
if i % 10 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
train_writer.add_summary(summary, i)
train_writer.close()
test_writer.close()
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
default=False,
help='If true, uses fake data for unit testing.')
parser.add_argument('--max_steps', type=int, default=1000,
help='Number of steps to run trainer.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate')
parser.add_argument('--dropout', type=float, default=0.9,
help='Keep probability for training dropout.')
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument('--log_dir', type=str, default='/tmp/tensorflow/mnist/logs/mnist_with_summaries',
help='Summaries log directory')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| |
import logging; _L = logging.getLogger('openaddr.ci.collect')
from ..compat import standard_library
from argparse import ArgumentParser
from os import environ
from zipfile import ZipFile, ZIP_DEFLATED
from os.path import splitext, exists, basename, join
from urllib.parse import urlparse
from operator import attrgetter
from tempfile import mkstemp, mkdtemp
from datetime import date
from shutil import rmtree
from .objects import read_latest_set, read_completed_runs_to_date
from . import db_connect, db_cursor, setup_logger, render_index_maps
from .. import S3, iterate_local_processed_files
parser = ArgumentParser(description='Run some source files.')
parser.add_argument('-o', '--owner', default='openaddresses',
help='Github repository owner. Defaults to "openaddresses".')
parser.add_argument('-r', '--repository', default='openaddresses',
help='Github repository name. Defaults to "openaddresses".')
parser.add_argument('-b', '--bucket', default='data.openaddresses.io',
help='S3 bucket name. Defaults to "data.openaddresses.io".')
parser.add_argument('-d', '--database-url', default=environ.get('DATABASE_URL', None),
help='Optional connection string for database. Defaults to value of DATABASE_URL environment variable.')
def main():
''' Single threaded worker to serve the job queue.
'''
args = parser.parse_args()
setup_logger(environ.get('AWS_SNS_ARN'))
# Rely on boto AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY variables.
s3 = S3(None, None, environ.get('AWS_S3_BUCKET', args.bucket))
with db_connect(args.database_url) as conn:
with db_cursor(conn) as db:
set = read_latest_set(db, args.owner, args.repository)
runs = read_completed_runs_to_date(db, set.id)
render_index_maps(s3, runs)
dir = mkdtemp(prefix='collected-')
everything = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-collected.zip')))
us_northeast = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-us_northeast.zip')))
us_midwest = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-us_midwest.zip')))
us_south = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-us_south.zip')))
us_west = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-us_west.zip')))
europe = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-europe.zip')))
asia = collect_and_publish(s3, _prepare_zip(set, join(dir, 'openaddresses-asia.zip')))
for (sb, fn, sd) in iterate_local_processed_files(runs):
everything.send((sb, fn, sd))
if is_us_northeast(sb, fn, sd): us_northeast.send((sb, fn, sd))
if is_us_midwest(sb, fn, sd): us_midwest.send((sb, fn, sd))
if is_us_south(sb, fn, sd): us_south.send((sb, fn, sd))
if is_us_west(sb, fn, sd): us_west.send((sb, fn, sd))
if is_europe(sb, fn, sd): europe.send((sb, fn, sd))
if is_asia(sb, fn, sd): asia.send((sb, fn, sd))
everything.close()
us_northeast.close()
us_midwest.close()
us_south.close()
us_west.close()
europe.close()
asia.close()
rmtree(dir)
def _prepare_zip(set, filename):
'''
'''
zipfile = ZipFile(filename, 'w', ZIP_DEFLATED, allowZip64=True)
sources_tpl = 'https://github.com/{owner}/{repository}/tree/{commit_sha}/sources'
sources_url = sources_tpl.format(**set.__dict__)
zipfile.writestr('README.txt', '''Data collected around {date} by OpenAddresses (http://openaddresses.io).
Address data is essential infrastructure. Street names, house numbers and
postal codes, when combined with geographic coordinates, are the hub that
connects digital to physical places.
Data licenses can be found in LICENSE.txt.
Data source information can be found at
{url}
'''.format(url=sources_url, date=date.today()))
return zipfile
def collect_and_publish(s3, collection_zip):
''' Returns a primed generator-iterator to accept sent source/filename tuples.
Each is added to the passed ZipFile. On completion, a new S3 object
is created with zipfile name and the collection is closed and uploaded.
'''
def get_collector_publisher():
source_dicts = dict()
while True:
try:
(source_base, filename, source_dict) = yield
except GeneratorExit:
break
else:
_L.info(u'Adding {} to {}'.format(source_base, collection_zip.filename))
add_source_to_zipfile(collection_zip, source_base, filename)
source_dicts[source_base] = {
'website': source_dict.get('website') or 'Unknown',
'license': source_dict.get('license') or 'Unknown'
}
# Write a short file with source licenses.
template = u'{source}\nWebsite: {website}\nLicense: {license}\n'
license_bits = [(k, v['website'], v['license']) for (k, v) in sorted(source_dicts.items())]
license_lines = [u'Data collected by OpenAddresses (http://openaddresses.io).\n']
license_lines += [template.format(source=s, website=w, license=l) for (s, w, l) in license_bits]
collection_zip.writestr('LICENSE.txt', u'\n'.join(license_lines).encode('utf8'))
collection_zip.close()
_L.info(u'Finished {}'.format(collection_zip.filename))
zip_key = s3.new_key(basename(collection_zip.filename))
zip_args = dict(policy='public-read', headers={'Content-Type': 'application/zip'})
zip_key.set_contents_from_filename(collection_zip.filename, **zip_args)
_L.info(u'Uploaded {} to {}'.format(collection_zip.filename, zip_key.name))
collector_publisher = get_collector_publisher()
# Generator-iterator must be primed:
# https://docs.python.org/2.7/reference/expressions.html#generator.next
next(collector_publisher)
return collector_publisher
def add_source_to_zipfile(zip_out, source_base, filename):
'''
'''
_, ext = splitext(filename)
if ext == '.csv':
zip_out.write(filename, source_base + ext)
elif ext == '.zip':
zip_in = ZipFile(filename, 'r')
for zipinfo in zip_in.infolist():
if zipinfo.filename == 'README.txt':
# Skip README files when building collection.
continue
zip_out.writestr(zipinfo, zip_in.read(zipinfo.filename))
zip_in.close()
def _is_us_state(abbr, source_base, filename, source_dict):
for sep in ('/', '-'):
if source_base == 'us{sep}{abbr}'.format(**locals()):
return True
if source_base.startswith('us{sep}{abbr}.'.format(**locals())):
return True
if source_base.startswith('us{sep}{abbr}{sep}'.format(**locals())):
return True
return False
def is_us_northeast(source_base, filename, source_dict):
for abbr in ('ct', 'me', 'ma', 'nh', 'ri', 'vt', 'nj', 'ny', 'pa'):
if _is_us_state(abbr, source_base, filename, source_dict):
return True
return False
def is_us_midwest(source_base, filename, source_dict):
for abbr in ('il', 'in', 'mi', 'oh', 'wi', 'ia', 'ks', 'mn', 'mo', 'ne', 'nd', 'sd'):
if _is_us_state(abbr, source_base, filename, source_dict):
return True
return False
def is_us_south(source_base, filename, source_dict):
for abbr in ('de', 'fl', 'ga', 'md', 'nc', 'sc', 'va', 'dc', 'wv', 'al',
'ky', 'ms', 'ar', 'la', 'ok', 'tx', 'tn'):
if _is_us_state(abbr, source_base, filename, source_dict):
return True
return False
def is_us_west(source_base, filename, source_dict):
for abbr in ('az', 'co', 'id', 'mt', 'nv', 'nm', 'ut', 'wy', 'ak', 'ca', 'hi', 'or', 'wa'):
if _is_us_state(abbr, source_base, filename, source_dict):
return True
return False
def _is_country(iso, source_base, filename, source_dict):
for sep in ('/', '-'):
if source_base == iso:
return True
if source_base.startswith('{iso}.'.format(**locals())):
return True
if source_base.startswith('{iso}{sep}'.format(**locals())):
return True
return False
def is_europe(source_base, filename, source_dict):
for iso in ('be', 'bg', 'cz', 'dk', 'de', 'ee', 'ie', 'el', 'es', 'fr',
'hr', 'it', 'cy', 'lv', 'lt', 'lu', 'hu', 'mt', 'nl', 'at',
'pl', 'pt', 'ro', 'si', 'sk', 'fi', 'se', 'uk', 'gr', 'gb' ):
if _is_country(iso, source_base, filename, source_dict):
return True
return False
def is_asia(source_base, filename, source_dict):
for iso in ('af', 'am', 'az', 'bh', 'bd', 'bt', 'bn', 'kh', 'cn', 'cx',
'cc', 'io', 'ge', 'hk', 'in', 'id', 'ir', 'iq', 'il', 'jp',
'jo', 'kz', 'kp', 'kr', 'kw', 'kg', 'la', 'lb', 'mo', 'my',
'mv', 'mn', 'mm', 'np', 'om', 'pk', 'ph', 'qa', 'sa', 'sg',
'lk', 'sy', 'tw', 'tj', 'th', 'tr', 'tm', 'ae', 'uz', 'vn',
'ye', 'ps',
'as', 'au', 'nz', 'ck', 'fj', 'pf', 'gu', 'ki', 'mp', 'mh',
'fm', 'um', 'nr', 'nc', 'nz', 'nu', 'nf', 'pw', 'pg', 'mp',
'sb', 'tk', 'to', 'tv', 'vu', 'um', 'wf', 'ws', 'is'):
if _is_country(iso, source_base, filename, source_dict):
return True
return False
if __name__ == '__main__':
exit(main())
| |
import hashlib
import operator
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
from django.db.models.fields.related import ManyToManyField
from django.db.transaction import atomic
from django.utils.encoding import force_bytes
from django.utils.log import getLogger
from django.utils.six.moves import reduce
from django.utils.six import callable
logger = getLogger('django.db.backends.schema')
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ";")
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
default_value = self.effective_default(field)
if include_default and default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null:
sql += " NULL"
else:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError('subclasses of BaseDatabaseSchemaEditor for backends which have requires_literal_defaults must provide a prepare_default() method')
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
default = ""
else:
default = None
# If it's a callable, call it
if callable(default):
default = default()
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# Indexes
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# FK
if field.rel:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(
self.sql_create_fk % {
"name": self._create_index_name(model, [field.column], suffix="_fk_%s_%s" % (to_table, to_column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
self.execute(sql, params)
# Add any index_togethers
for fields in model._meta.index_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, columns, unique=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_uniq"),
"columns": ", ".join(self.quote_name(column) for column in columns),
})
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, list(columns), index=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created indexes
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
self.deferred_sql.append(
self.sql_create_fk % {
"name": self.quote_name('%s_refs_%s_%x' % (
field.column,
to_column,
abs(hash((model._meta.db_table, to_table)))
)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.rel:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(model._meta.db_table),
"name": fk_name,
}
)
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Removed an index?
if old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
if strict and len(index_names) != 1:
raise ValueError("Found wrong number (%s) of indexes for %s.%s" % (
len(index_names),
model._meta.db_table,
old_field.column,
))
for index_name in index_names:
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": index_name,
}
)
# Drop any FK constraints, we'll remake them later
if old_field.rel:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(model._meta.db_table),
"name": fk_name,
}
)
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
rel_fk_names = self._constraint_names(rel.model, [rel.field.column], foreign_key=True)
for fk_name in rel_fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": fk_name,
}
)
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_check % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
}
)
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self.sql_rename_column % {
"table": self.quote_name(model._meta.db_table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
})
# Next, start accumulating actions to do
actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(model._meta.db_table, new_field.column, new_type)
actions.append(fragment)
post_actions.extend(other_actions)
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if old_default != new_default:
if new_default is None:
actions.append((
self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
},
[],
))
else:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if new_field.null:
actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
if actions:
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), reduce(operator.add, params))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(
self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
}
)
# Added an index?
if not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique):
self.execute(
self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
"extra": "",
}
)
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_pk % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_pk"),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Handle our type alters on the other end of rels from the PK stuff above
for rel in rels_to_update:
rel_db_params = rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
self.execute(
self.sql_alter_column % {
"table": self.quote_name(rel.model._meta.db_table),
"changes": self.sql_alter_column_type % {
"column": self.quote_name(rel.field.column),
"type": rel_type,
}
}
)
# Does it have a foreign key?
if new_field.rel:
self.execute(
self.sql_create_fk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_fk"),
"column": self.quote_name(new_field.column),
"to_table": self.quote_name(new_field.rel.to._meta.db_table),
"to_column": self.quote_name(new_field.rel.get_related_field().column),
}
)
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
self.execute(
self.sql_create_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": self._create_index_name(rel.model, [rel.field.column], suffix="_fk"),
"column": self.quote_name(rel.field.column),
"to_table": self.quote_name(model._meta.db_table),
"to_column": self.quote_name(new_field.column),
}
)
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_check"),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, column, type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table:
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0],
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| |
# File with utility functions for models in pytorch
# Imports ___________________________________________________________________
import os
import re
import torch
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm_notebook
from torchvision.utils import make_grid
from torch.autograd import Variable
import pandas as pd #for some debugging
import shutil, errno
# copy function
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
# Optimizers not put in master yet __________________________________________
class Nadam(torch.optim.Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (state['step'] * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 *
(0.96 ** ((state['step'] + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq.div(1. - bias_correction2)
denom = exp_avg_sq_prime.sqrt_().add_(group['eps'])
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) /
(1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 /
(1. - m_schedule_next), exp_avg, denom)
return loss
# Visualizing Functions _____________________________________________________
def show_image(image):
'''Shows a PIL image'''
plt.figure()
plt.imshow(image)
plt.show()
def get_example_image(image_fullpaths):
'''From an array-like, choose one image'''
return Image.open(np.random.choice(image_fullpaths))
# Making Datasets ___________________________________________________________
class TestDataset(Dataset):
"""Args: path to dir, transforms; makes test dataset for images"""
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
self.samples = [filename for filename in os.listdir(root_dir) if '.jpg' in filename]
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.samples[idx])
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
#image = Image.fromarray(image)
return image, int(re.findall(r'\d+', self.samples[idx])[0])
class DfNumpyDataset(Dataset):
'''Makes dataset from df or numpy array of data and targets'''
def __init__(self, data, targets):
self.data = data
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx,:], self.targets[idx,:]
def get_dataset(data, targets):
return DfNumpyDataset(data, targets)
def get_image_dataset(root, tsfm=None):
'''Makes dataset from images following structure of root/class/img.png'''
return datasets.ImageFolder(root, transform=tsfm)
def get_loader(dataset, use_cuda=True, batch_size=64, shuffle=False):
'''Makes iterator/batcher for iterating over to train nn. Feed it a
Dataset (e.g. get_image_dataset)'''
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, pin_memory=use_cuda)
def precompute_vals(model, data_loader, test_set=False):
'''Given a model (up to where computation is wanted) and data loader, precomputes values'''
outputs_list = []
labels_list = []
for i, data in tqdm_notebook(enumerate(data_loader)):
# get inputs and labels
inputs, labels = data
# wrap in Variable
try:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
labels = labels.view(-1)
except AttributeError:
inputs = Variable(inputs.cuda())
# forward pass
outputs = model(inputs)
outputs_list.append(outputs)
labels_list.append(labels)
del inputs
del labels
outputs_ret = torch.cat(outputs_list)
try:
labels_ret = torch.cat(labels_list)
except TypeError:
labels_ret = [ids for sublist in labels_list for ids in sublist]
return outputs_ret, labels_ret
# if test_set==False:
# else:
# # for test sets
# outputs_list = []
# ids_list = []
# for i, data in tqdm_notebook(enumerate(data_loader)):
# # get inputs and ids
# inputs, ids = data
# # wrap in Variable
# inputs, ids = Variable(inputs.cuda()), ids
# # forward pass
# outputs = model(inputs)
# outputs_list.append(outputs)
# ids_list.extend(ids)
# del inputs
# del ids
# return torch.cat(outputs_list), ids_list
def save_precompute(X, y, path, model_name, filename):
precom_savedir = make_savedir(path, 'precom_'+model_name)
X_converted = X.data.cpu().numpy()
try:
y_converted = y.data.cpu().numpy().reshape(-1,1)
except AttributeError:
y_converted = np.array(y).reshape(-1,1)
torch.save((X_converted,y_converted), os.path.join(precom_savedir, filename))
print('Saved at {0}'.format(os.path.join(precom_savedir, filename)))
# Image Standardization _____________________________________________________
def get_mean_rgb(train_paths):
'''given an array-like of all paths to train, under structure of
root/class/img, will return mean rgb scaled to 0-1 (from 0-255)'''
return np.array([(np.array(Image.open(path)) / 255).mean(0).mean(0) for path in tqdm_notebook(train_paths)]).mean(0)
def get_std_dev_rgb(train_paths, mean_rgb):
'''given an array-like of all paths to train, under structure of
root/class/img, will return std_dev rgb scaled to 0-1 (from 0-255)'''
return np.array([(((np.array(Image.open(path)) / 255) - mean_rgb)**2).mean(0).mean(0) for path in tqdm_notebook(train_paths)]).mean(0)**.5
class UnNormalize(object):
'''To undo a Normalize transform.'''
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# Dir Functions _____________________________________________________________
# different dir functions for classification tasks vs neural style transfer
def get_image_classes_clf(root):
'''Returns a list of classes when structure is root/class/...'''
return [file for file in os.listdir(root) if '.DS_Store' not in file]
def get_image_fullpaths_clf(root, img_fmt):
'''Returns list of full paths to samples in root assuming root/class/...'''
return [os.path.join(root, classes, path) for classes in get_image_classes_clf(root) for path in os.listdir(
os.path.join(root, classes)) if img_fmt in path]
def get_image_fullpaths_nst(root, img_fmt):
'''Returns list of full paths to samples in root assuming root/img.jpeg'''
return [os.path.join(root, file) for file in os.listdir(root) if img_fmt in file]
# Training Models ___________________________________________________________
def make_savedir(path, model_name):
'''Makes a savedir to hold saves if the dir does not exist. returns savedir path'''
savedir = os.path.join(path, 'save_{0}'.format(model_name))
if not os.path.isdir(savedir):
os.mkdir(savedir)
return savedir
def train_model(model, model_name, train_loader, valid_loader, optimizer, criterion, n_epochs, save_epoch, savedir, variance_pct_thrsh, patience_epoch, pct_change, decay_rate, continue_training=False, g_epoch=1, verbose=False, lr_scheduler=False, early_stop=False):
'''Basic setup for training models and saving every multiple of save_epoch.
This assumes the full model can fit on one gpu. If different parts are
on different GPUs, will need to specifically set devices numbers in
cuda calls based on architecture of model.
model = pytorch model
model_name = model name
train_loader = loader made from train dataset
valid_loader = loader made from valid dataset
optimizer = choice of optimizer
criterion = loss function
n_epochs = number of epochs to train for
save_epochs = save a state dict every save_epochs
savedir = dir to save state dicts in
patience_epoch = epochs to wait where change in loss is below pct_change
before decaying learning rate
pct_change = the percent change difference desired in loss
decay_rate = float from 0-1 which will multiply the current learning rate
continue_training = specifies of training is continuing or fresh
g_epoch = what global train epoch number. 1 if training fresh.
returns g_epoch to keep track of how many training epochs
'''
epoch_list = []
loss_list = []
train_accuracy_list = []
valid_accuracy_list = []
lr_list = []
early_stopping_flag = 0
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
plt.ion()
fig.show()
fig.canvas.draw()
k = 0
if continue_training:
try:
g_epoch = g_epoch[0]
except:
pass
epochs = range(g_epoch, g_epoch + n_epochs)
for epoch in tqdm_notebook(epochs):
# epoch stats for plotting
correct = 0
seen = 0
current_loss = 0.0
g_epoch += 1
for i, data in enumerate(train_loader):
# get inputs and labels
inputs, labels = data
# wrap in Variable
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
labels = labels.view(-1)
# zero the gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
# import pdb; pdb.set_trace()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# statistic updates
current_loss += loss.data[0]
seen += len(inputs)
# import pdb; pdb.set_trace()
correct += (labels == outputs.max(1)[1]).cpu().data.numpy().sum()
# Plotting ___________________________________________________________
epoch_list.append(epoch)
if not loss_list:
last_loss = 9999999
else:
last_loss = loss_list[-1]
valid_accuracy = check_accuracy(valid_loader, model)
try:
if valid_accuracy > highest_valid_acc:
save_best = True
highest_valid_acc = valid_accuracy
else:
save_best = False
except:
highest_valid_acc = 0
save_best = False
train_accuracy = float(correct)/float(seen)
if verbose == True:
print('epoch: {0}, loss: {3}, train_acc: {1}, valid_acc: {2}'.format(epoch,train_accuracy, valid_accuracy, current_loss))
loss_list.append(current_loss)
train_accuracy_list.append(train_accuracy)
valid_accuracy_list.append(valid_accuracy)
lr_list.append(optimizer.param_groups[0]['lr'])
ax1.clear()
ax1.plot(epoch_list, loss_list)
ax2.clear()
ax2.plot(epoch_list, train_accuracy_list, 'ro', label='train')
ax2.plot(epoch_list, valid_accuracy_list, label='valid')
ax3.plot(epoch_list, lr_list)
plt.title("Epoch: {0}, loss left, accuracy middle, learning rate right".format(epoch))
ax2.legend(loc='best')
fig.canvas.draw()
# To decay learning rate _____________________________________________
decrease_lr = 0
if lr_scheduler:
if train_accuracy > .93:
if decrease_lr % patience_epoch == 0:
optimizer.param_groups[0]['lr'] *= decay_rate
decrease_lr += 1
# if abs((last_loss - current_loss) / last_loss) < pct_change:
# k += 1
# if k >= patience_epoch:
# k = 0
# optimizer.param_groups[0]['lr'] *= decay_rate
# else:
# k = 0
# Conditionally save if variance is starting to grow btwn datasets, early stopping as well
if early_stop:
if (valid_accuracy < train_accuracy) & (((train_accuracy - valid_accuracy)/train_accuracy) > variance_pct_thrsh):
print("Epoch passing variance cutoff: {0}".format(epoch))
early_stopping_flag += 1
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_{0}'.format(epoch)))
if early_stopping_flag > patience_epoch*2:
print('Triggered early stopping flag')
break
else:
early_stopping_flag = 0
# Saving _____________________________________________________________
if (epoch) % save_epoch == 0:
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_{0}'.format(epoch)))
if save_best:
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_best'.format(epoch)))
# Free up cuda memory again ______________________________________________
del inputs
del labels
del loss
del optimizer
del criterion
return g_epoch, epoch_list, loss_list, train_accuracy_list, valid_accuracy_list
# Validation Functions ______________________________________________________
def get_classes_strings(classes, labels_ids):
# returns the classes in string format
return [classes[label_id] for label_id in labels_ids]
def get_prediction_classes_ids(predictions):
# returns the predictions in id format
predictions_ids = predictions.cpu().data.numpy().argmax(1)
return predictions_ids
def get_prediction_classes_strings(classes, predictions):
# returns the predictions in string format
return get_classes_strings(classes, get_prediction_classes_ids(predictions))
def show_iter(img):
plt.figure()
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.show()
def predictions_vs_actuals(iterator, model, rvrs_tsfm, classes):
model.eval()
images, labels = iterator.next()
img_list = [rvrs_tsfm(img) for img in images]
labels_string = get_classes_strings(classes, labels.numpy())
show_iter(make_grid(img_list, padding=10))
# display the predictons for the images above
predictions = model(Variable(images.cuda()))
predictions_string = get_prediction_classes_strings(classes, predictions)
print('Actuals: ', labels_string)
print('Predictions: ', predictions_string)
del predictions
del images
del labels
def make_predictions(data_loader, model):
model.eval()
pred_list = []
for i, data in enumerate(data_loader):
images, labels = data
labels=labels.view(-1)
predictions = list(model(Variable(images.cuda())).max(1)[1].cpu().data)
pred_list.extend(predictions)
return pred_list
def check_accuracy(data_loader, model):
model.eval()
correct = 0
seen = 0
total_len = len(data_loader)
for i, data in enumerate(data_loader):
images, labels = data
labels=labels.view(-1)
seen += len(images)
predictions = model(Variable(images.cuda()))
# labels is tensor, predictions is variable; predictions pull data out to numpy
correct += (labels.numpy() == predictions.max(1)[1].cpu().data.numpy()).sum() #predictions.max(1)[1] returns indicies of max preds
# import pdb; pdb.set_trace()
del images
del labels
del predictions
# print('Accuracy: {0}, Saw: {1}, Correct: {2}'.format(correct/seen, seen, correct))
return float(correct)/float(seen)
| |
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom BigQuery hook to generate BigQuery table pages as blobs."""
from typing import Any, Dict, Generator, List, Optional, Text
from airflow.contrib.hooks import bigquery_hook
from googleapiclient import errors as googleapiclient_errors
from gps_building_blocks.airflow.hooks import input_hook_interface
from gps_building_blocks.airflow.utils import blob
from gps_building_blocks.airflow.utils import errors
from gps_building_blocks.airflow.utils import retry_utils
_DEFAULT_PAGE_SIZE = 1000
_PLATFORM = 'BigQuery'
class BigQueryHook(
bigquery_hook.BigQueryHook, input_hook_interface.InputHookInterface):
"""Custom BigQuery hook to generate table pages as blobs.
Attributes:
dataset_id: Unique name of the dataset.
table_id: Unique location within the dataset.
selected_fields: Subset of fields to return.
url: URL of data, formatted as 'bq://{project_id}.{dataset_id}.{table.id}'.
"""
def __init__(self,
conn_id: Text,
dataset_id: Text,
table_id: Text,
selected_fields: Optional[Text] = None,
**kwargs) -> None:
"""Initializes the generator of a specified BigQuery table.
Args:
conn_id: Connection id passed to airflow's BigQueryHook.
dataset_id: Dataset id of the target table.
table_id: Table name of the target table.
selected_fields: Subset of fields to return. Example: 'field_1,field_2'.
**kwargs: Other arguments to pass through to Airflow's BigQueryHook.
"""
super(BigQueryHook, self).__init__(bigquery_conn_id=conn_id, **kwargs)
self.dataset_id = dataset_id
self.table_id = table_id
self.selected_fields = selected_fields
self.url = 'bq://{}.{}.{}'.format(
self._get_field('project'), self.dataset_id, self.table_id)
def _str_to_bq_type(self, bq_str: Text, bq_type: Text) -> Any:
"""Casts BigQuery string row data to the appropriate BigQuery data type.
Args:
bq_str: String data to be cast to target type.
bq_type: Target data type, e.g. BOOLEAN, INTEGER, FLOAT, TIMESTAMP.
Returns:
Typed data cast from string. None when input data is None.
"""
if bq_str is None:
return None
elif bq_type == 'BOOLEAN':
if bq_str.lower() not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
bq_str))
return bq_str.lower() == 'true'
elif bq_type == 'INTEGER':
return int(bq_str)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(bq_str)
else:
return bq_str
def _query_results_to_blob(
self, query_results: Dict[Text, Any], start_index: int) -> blob.Blob:
"""Converts query results of BigQuery to event blob.
Args:
query_results: Raw query results.
start_index: Start index of BigQuery table rows.
Returns:
blob: Event blob containing event list and status.
"""
blob_unique_id = '{}/{}'.format(self.url, start_index)
if query_results is None:
return blob.Blob(events=[], blob_id=blob_unique_id,
platform=_PLATFORM, source=self.dataset_id,
location=self.table_id, position=start_index,
status=blob.BlobStatus.ERROR,
status_desc='Unable to get the blob at {} in {}.'.format(
start_index, self.url))
events = self._query_results_to_maps_list(query_results)
return blob.Blob(events=events, blob_id=blob_unique_id,
platform=_PLATFORM, source=self.dataset_id,
location=self.table_id, position=start_index)
def _query_results_to_maps_list(
self, query_results: Dict[Text, Any]) -> List[Dict[Text, Any]]:
"""Converts table rows query results of BigQuery to list of maps.
Args:
query_results: Raw query result.
Returns:
data: Table rows in the format of list of maps.
"""
fields = [field['name'] for field in query_results['schema']['fields']]
col_types = [field['type'] for field in query_results['schema']['fields']]
rows = query_results.get('rows', [])
batch_data = []
for row in rows:
values = [cell['v'] for cell in row['f']]
typed_values = [self._str_to_bq_type(value, type)
for value, type in zip(values, col_types)]
data = dict(zip(fields, typed_values))
batch_data.append(data)
return batch_data
@retry_utils.logged_retry_on_retriable_http_error
def _get_tabledata_with_retries(self, bq_cursor: bigquery_hook.BigQueryCursor,
start_index: int) -> Dict[Text, Any]:
"""Attempt to get BigQuery table data with retries.
Args:
bq_cursor: BigQuery Cursor instance.
start_index: Zero based index of the starting row to read.
Returns:
query_results: Map containing the requested rows.
"""
query_results = bq_cursor.get_tabledata(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=_DEFAULT_PAGE_SIZE,
start_index=start_index,
selected_fields=self.selected_fields)
if query_results and not query_results.get('schema'):
query_results['schema'] = bq_cursor.get_schema(self.dataset_id,
self.table_id)
return query_results
def list_tables(self,
dataset_id: Optional[Text] = None,
prefix: Text = '') -> List[Text]:
"""Lists table ids in specified dataset filtered by specified prefix.
Args:
dataset_id: Dataset id of which the tables to list.
prefix: Prefix of the table id to list.
Returns:
table_ids: List of table ids.
"""
if not dataset_id:
dataset_id = self.dataset_id
bq_cursor = self.get_conn().cursor()
tables_list_resp = bq_cursor.service.tables().list(
projectId=bq_cursor.project_id,
datasetId=dataset_id,
maxResults=_DEFAULT_PAGE_SIZE).execute()
result = []
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'].startswith(prefix):
result.append(table['tableReference']['tableId'])
if tables_list_resp.get('nextPageToken'):
tables_list_resp = bq_cursor.service.tables().list(
projectId=bq_cursor.project_id,
datasetId=dataset_id,
maxResults=_DEFAULT_PAGE_SIZE,
pageToken=tables_list_resp['nextPageToken']).execute()
else:
break
return result
def events_blobs_generator(self) -> Generator[blob.Blob, None, None]:
"""Generates pages of specified BigQuery table as blobs.
Yields:
blob: A blob object containing events from a page with length of
_DEFAULT_PAGE_SIZE from the specified BigQuery table.
Raises:
DataInConnectorError: Raised when BigQuery table data cannot be accessed.
"""
start_index = 0
total_rows = -1
bq_cursor = self.get_conn().cursor()
# Get the first page to ensure the accessibility.
try:
query_results = self._get_tabledata_with_retries(bq_cursor=bq_cursor,
start_index=start_index)
except googleapiclient_errors.HttpError as error:
raise errors.DataInConnectorError(error=error, msg=str(error))
else:
if query_results is None:
raise errors.DataInConnectorError(
msg='Unable to get any blobs in {}.'.format(self.url))
try:
total_rows = int(query_results.get('totalRows'))
except (AttributeError, TypeError, ValueError):
raise errors.DataInConnectorError(
msg='Unable to get total rows in {}.'.format(self.url))
else:
yield self._query_results_to_blob(query_results, start_index)
start_index = start_index + _DEFAULT_PAGE_SIZE
# Get the remaining pages of the requested table.
while start_index < total_rows:
try:
query_results = self._get_tabledata_with_retries(
bq_cursor=bq_cursor, start_index=start_index)
except googleapiclient_errors.HttpError as error:
# Generate a blob with error status.
blob_unique_id = '{}/{}'.format(self.url, start_index)
yield blob.Blob(events=[], blob_id=blob_unique_id,
platform=_PLATFORM, source=self.dataset_id,
location=self.table_id, position=start_index,
status=blob.BlobStatus.ERROR, status_desc=str(error))
else:
yield self._query_results_to_blob(query_results, start_index)
finally:
start_index = start_index + _DEFAULT_PAGE_SIZE
| |
#!/usr/bin/python
# filename: ab_analysis.py
###########################################################################
#
# Copyright (c) 2013 Bryan Briney. All rights reserved.
#
# @version: 1.0.0
# @author: Bryan Briney
# @props: IgBLAST team (http://www.ncbi.nlm.nih.gov/igblast/igblast.cgi)
# @license: MIT (http://opensource.org/licenses/MIT)
#
###########################################################################
import os
import time
import math
import glob
import platform
import argparse
import threading
from subprocess import Popen, PIPE
from multiprocessing import Pool, cpu_count
from Bio import SeqIO
from blast_parse import BlastParse
parser = argparse.ArgumentParser("Antibody annotation with IgBLAST.")
parser.add_argument('-i', '--in', dest='input', required=True,
help="The input file, to be split and processed in parallel. \
If a directory is given, all files in the directory will be iteratively processed.")
parser.add_argument('-o', '--out', dest='output', required=True,
help="The output directory, which will contain JSON or tab-delimited output files.")
parser.add_argument('-l', '--log', dest='log', default='',
help="The log file, to which the BlastParse log info will be written. \
Default is stdout.")
parser.add_argument('-t', '--temp', dest='temp_dir', default='',
help="The directory in which temp files will be stored. \
If the directory doesn't exist, it will be created. \
Defaults to './temp_files'.")
parser.add_argument('-p', '--threads', dest='num_threads', default=0, type=int,
help="Number of parallel igblastn instances to spawn. \
Defaults to max available processors.")
parser.add_argument('-v', '--tsv', dest="tsv_out", action='store_true', default=False,
help="NOT YET IMPLEMENTED. If set, the final output (from BlastParse) will be in tab-delimited format. \
Defaults to JSON output.")
parser.add_argument('-m', '--merge', dest="merge", action='store_true', default=False,
help="Use if the input files are paired-end FASTQs (either gzip compressed or uncompressed) \
from Illumina platforms. Prior to running IgBLAST, reads will be merged with pandaseq. \
Requires that pandaseq is installed.")
parser.add_argument('-n', '--next_seq', dest="next_seq", action='store_true', default=False,
help="Use if the run was performed on a NextSeq sequencer. \
Multiple lane files for the same sample will be merged.")
parser.add_argument('-u', '--uaid', dest="uaid", type=int, default=None,
help="Use if the input files contain unique antibody identifiers (UAIDs). \
UAIDs will be identified and incorporated into the output JSON file.")
parser.add_argument('-b', '--basespace', dest="use_basespace", default=False, action='store_true',
help="NOT YET IMPLEMENTED. Use flag if files should be downloaded directly from BaseSpace. \
Files will be downloaded into the directory provided with the '-i' flag, which should be empty.")
parser.add_argument('-d', '--debug', dest="debug", action='store_true', default=False,
help="If set, will write all failed/exception sequences to file and give more informative errors.")
parser.add_argument('-s', '--species', dest='species', default='human',
choices=['human', 'macaque', 'mouse'])
args = parser.parse_args()
class launch_thread(threading.Thread):
def __init__(self, in_file, out_file):
threading.Thread.__init__(self)
self.in_file = in_file
self.out_file = out_file
binary = './igblastn_' + platform.system().lower()
self.cmd = '{3} -germline_db_V database/{0}_gl_V -germline_db_J database/{0}_gl_J -germline_db_D database/{0}_gl_D ' + \
'-organism {0} -domain_system imgt -auxiliary_data optional_file/{0}_gl.aux ' + \
'-show_translation -outfmt 3 -num_alignments_V 1 -num_alignments_D 1 -num_alignments_J 1 ' + \
'-query {1} -out {2}'.format(args.species, self.in_file, self.out_file, binary)
def run(self):
p = Popen(self.cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
#####################################################################
#
# FILES AND DIRECTORIES
#
#####################################################################
def build_temp_dirs():
if args.temp_dir != '':
temp_directory = args.temp_dir
else:
temp_directory = "./temp_files"
temp_out_directory = temp_directory + "/temp_output"
if not os.path.exists(temp_directory): os.mkdir(temp_directory)
if not os.path.exists(temp_out_directory): os.mkdir(temp_out_directory)
return temp_directory, temp_out_directory
def build_output_dir():
output_dir = args.output
if not os.path.exists(output_dir): os.mkdir(output_dir)
return output_dir
def list_files(d):
if os.path.isdir(d):
expanded_dir = os.path.expanduser(d)
return sorted(glob.glob(expanded_dir + '/*'))
else:
return [d,]
def file_length(file):
c = 0
with open(file) as f:
for i, l in enumerate(f):
if l[0] == '>': c += 1
return c
def num_procs():
if args.num_threads > 0:
return args.num_threads
return cpu_count()
def clean_up(temp_files, temp_out_files, temp_directory, temp_out_directory):
for file in temp_out_files:
os.remove(file)
os.rmdir(temp_out_directory)
for file in temp_files:
os.remove(file)
os.rmdir(temp_directory)
def file_splitter(file, splitlen, num_seqs, temp_directory):
counter = 1
files_list = []
lines = open(file, 'r').read().replace(' ', '_').split('>')
for line in range(0, num_seqs+1, splitlen):
output = lines[line:line+splitlen]
temp_filename = temp_directory + "/tempfile_" + str(counter)
files_list.append(temp_filename)
open(temp_filename, "w").write("")
temp_file = open(temp_directory + "/tempfile_" + str(counter), "a")
if counter == 1:
temp_file.write('>'.join(output))
counter += 1
else:
temp_file.write('>' + '>'.join(output))
counter += 1
temp_file.close()
return files_list
#####################################################################
#
# PRINTING
#
#####################################################################
def print_input_info(i):
print ''
print ''
print '========================================'
print 'Parallel IgBLAST'
print '========================================'
print ''
if len(i) > 1:
print 'Input is a directory of {} files.\n\n'.format(len(i))
else:
print 'Input is a single file.\n\n'
def print_infile(i):
b = os.path.basename(i)
print '-'*len(b)
print b
print '-'*len(b)
def print_summary_output(g, e, f, blast_time, parse_time):
total_seqs = g+e+f
print ''
print 'Out of {} total sequences:'.format(total_seqs)
print '{} sequences processed normally'.format(g)
print '{} sequences passed sanity checks, but could not be processed'.format(e)
print '{} sequences failed sanity checks are were not processed'.format(f)
print ''
print 'IgBLAST took {0} seconds ({1} sequences per second)'.format(blast_time, total_seqs/blast_time)
print 'parsing took {0} seconds ({1} sequences per second)'.format(parse_time, total_seqs/parse_time)
print ''
#####################################################################
#
# PARSING
#
#####################################################################
def line_generator(blast_file):
f = open(blast_file, 'r')
for line in f:
yield line
def block_generator(blast_file):
l = line_generator(blast_file)
line = next(l)
while line.find('Query= ') == -1: line = next(l)
block = line.replace('Query= ', '')
while True:
try:
line = next(l)
while line.find('Query= ') == -1:
block += line
line = next(l)
yield block
block = line.replace('Query= ', '')
except StopIteration:
yield block
break
raise StopIteration
def do_parse(blastout):
out_file = blastout + '.json'
result = []
pool = Pool(processes=cpu_count())
for i in block_generator(blastout):
try:
if args.debug:
result.append(parser(i))
else:
result.append(pool.apply_async(parser, (i,)))
except StopIteration:
break
pool.close()
pool.join()
good, exceptions, failed = process_parse_data(result, out_file)
result = []
return good, exceptions, failed
def parser(i):
bp = BlastParse(i, species=args.species, tsv=args.tsv_out, log=args.log, debug=args.debug, uaid=args.uaid)
if bp.sanity_checks() < 1:
output = bp.parse()
return output
else:
return ['', '', i]
def process_parse_data(results, out_file):
good = 0
exceptions = 0
failed = 0
r_handle = build_result_handle(out_file)
if args.debug:
e_handle = build_exception_handle(out_file)
f_handle = build_failed_handle(out_file)
for result in results:
if args.debug:
if result[0] != '':
r_handle.write(result[0])
good += 1
elif result[1] != '':
e_handle.write(result[1])
exceptions += 1
elif result[2] != '':
f_handle.write(result[2])
failed += 1
else:
r = result.get()
if r[0] != '':
r_handle.write(r[0])
good += 1
elif r[1] != '': exceptions += 1
elif r[2] != '': failed += 1
return good, exceptions, failed
def build_result_handle(out_file):
open(out_file, 'w').write('')
return open(out_file, 'a')
def build_exception_handle(out_file):
e_file = out_file.split('.')[0] + '_exceptions'
open(e_file, 'w').write('')
return open(e_file, 'a')
def build_failed_handle(out_file):
f_file = out_file.split('.')[0] + '_failed'
open(f_file, 'w').write('')
return open(f_file, 'a')
#####################################################################
#
# INPUT PROCESSING
#
#####################################################################
def check_input(input_list):
format = format_check(input_list[0])
if format == 'fasta':
return input_list
else:
return convert_to_fasta(input_list)
def format_check(in_file):
with open(in_file) as f:
line = f.next()
while line == '':
line = f.next()
if line.startswith('>'):
return 'fasta'
elif line.startswith('@'):
return 'fastq'
else:
raise RuntimeError('Input files must be in either FASTA or FASTQ format.')
def convert_to_fasta(input_list):
fasta_dir = args.input + 'fastas/'
if not os.path.exists(fasta_dir):
os.mkdir(fasta_dir)
for f in input_list:
out_file = os.path.join(fasta_dir, os.path.basename(f).split('.')[0])
open(out_file, 'w').write('')
out_handle = open(out_file, 'a')
for s in SeqIO.parse(f, 'fastq'):
out_handle.write('>{0}\n{1}\n'.format(s.id, str(s.seq)))
return list_files(fasta_dir)
def merge_reads():
import pandaseq
merge_dir = args.input + 'merged_reads/'
if not os.path.exists(merge_dir):
os.mkdir(merge_dir)
pandaseq.run(args.input, merge_dir, nextseq=args.next_seq)
return list_files(merge_dir)
def preprocess(files):
import pre_processing
processed_dir = args.input + 'processed/'
if not os.path.exists(processed_dir):
os.mkdir(processed_dir)
pre_processing.run(files, processed_dir)
return list_files(processed_dir)
def download_files():
from basespace import BaseSpace
bs = BaseSpace()
bs.download(args.input)
args.merge = True
#####################################################################
#
# IgBLAST
#
#####################################################################
def do_igblast(i, out_dir):
o_prefix = os.path.basename(i).split('.')[0]
o = os.path.join(out_dir, o_prefix + '_blastout')
# parallel IgBLASTn
blast_start = time.time()
blastout = parallel_igblast(i,o)
blast_end = time.time()
blast_time = blast_end - blast_start
# parse the IgBLASTn output
parse_start = time.time()
good_seqs, exc_seqs, failed_seqs = do_parse(blastout)
parse_end = time.time()
parse_time = parse_end - parse_start
print_summary_output(good_seqs, exc_seqs, failed_seqs, blast_time, parse_time)
def parallel_igblast(in_file, out_file):
num_seqs = file_length(in_file)
threads = num_procs()
split_length = int(math.ceil(float(num_seqs) / threads))
temp_directory, temp_out_directory = build_temp_dirs()
split_files = file_splitter(in_file, split_length, num_seqs, temp_directory)
thread_list = []
blastout_list = []
# run IgBLASTn in parallel
for f in split_files:
temp_out_file = os.path.join(temp_out_directory, os.path.basename(f).split('.')[0] + "_blastout")
t = launch_thread(f, temp_out_file)
t.start()
thread_list.append(t)
blastout_list.append(temp_out_file)
for thread in thread_list:
thread.join()
# combine all blastout files into a single output file
open(out_file, 'w').write('')
with open(out_file, 'w') as out_handle:
for f in blastout_list:
with open(f) as in_handle:
for line in in_handle:
out_handle.write(line)
clean_up(split_files, blastout_list, temp_directory, temp_out_directory)
return out_file
def main():
# if args.use_basespace:
# download_files()
if args.merge:
input_list = merge_reads()
else:
input_list = list_files(args.input)
input_list = check_input(input_list)
input_list = preprocess(input_list)
print_input_info(input_list)
output_dir = build_output_dir()
for i in input_list:
if os.path.isfile(i):
print_infile(i)
do_igblast(i, output_dir)
if __name__ == '__main__':
main()
| |
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gdb
import re
## IMPORTANT NOTE:
#
# This file is a Python GDB script that is highly dependent on
# symbol names, even the internal functions and parameters.
#
# Whenever depending on a symbol, mark them in the source file
# so people know they have to adapt this file on changes.
## LOADING:
#
# This file should be auto-loaded by gdb if it is installed in GDB's
# auto-load directory and matches the installed libsoletta.so,
# including the final so-version.
#
# If soletta is installed to custom directory, then make sure GDB knows
# about this location and that the directory is marked as safe-path:
#
# (gdb) add-auto-load-scripts-directory ${soletta_prefix}/share/gdb/auto-load
# (gdb) add-auto-load-safe-path ${soletta_prefix}/share/gdb/auto-load
#
# It may be included directly if not auto-loaded:
#
# (gdb) source ${soletta_prefix}/share/gdb/auto-load/libsoletta.so-gdb.py
#
## Usage:
# commands start with 'sol_' prefix, then you can use 'apropos ^sol_' to
# filter commands in our namespace or tabl-completion.
# GDB's "help command" to get more information
defvalue_member_map = {
"string": "s",
"byte": "byte",
"boolean": "b",
"int": "i",
"float": "f",
"rgb": "rgb",
}
def get_type_description(type):
try:
tdesc = type["description"]
if tdesc:
return tdesc.dereference()
except KeyError:
pass
return None
def get_node_type_description(node):
type = node["type"]
return get_type_description(type)
def _get_node_port_index_by_name(node, member, port_name):
tdesc = get_node_type_description(node)
if not tdesc:
return -1
array = tdesc[member]
if not array:
return -1
i = 0
while array[i]:
port = array[i]
if port["name"] and port["name"].string() == port_name:
return i
i += 1
return -1
def get_node_port_out_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_out", port_name)
def get_node_port_in_index_by_name(node, port_name):
return _get_node_port_index_by_name(node, "ports_in", port_name)
def _get_node_port_name_by_index(node, member, port_index):
tdesc = get_node_type_description(node)
if not tdesc:
return None
array = tdesc[member]
if not array:
return None
i = 0
while array[i]:
if i == port_index:
port = array[i]
if port["name"]:
return port["name"].string()
return None
elif i > port_index:
break
i += 1
return None
def get_node_port_out_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_out", port_index)
def get_node_port_in_name_by_index(node, port_index):
return _get_node_port_name_by_index(node, "ports_in", port_index)
class FlowTypePrinter(object):
"Print a 'struct sol_flow_node_type'"
def __init__(self, val):
self.val = val
self.port_in_type = gdb.lookup_type("struct sol_flow_port_type_in").const().pointer()
def display_hint(self):
return 'sol_flow_node_type'
def _port_description_to_string(self, index, port, port_type):
s = ("\n %d %s (%s)\n" \
" description: %s\n") % (
index,
port["name"].string(),
port["data_type"].string(),
port["description"].string())
if port_type["connect"]:
s += " connect(): %s\n" % (port_type["connect"],)
if port_type["disconnect"]:
s += " disconnect(): %s\n" % (port_type["disconnect"],)
if port_type.type == self.port_in_type and port_type["process"]:
s += " process(): %s\n" % (port_type["process"],)
return s
def _option_description_to_string(self, option):
data_type = option["data_type"].string()
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = option["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
return "\n %s(%s) \"%s\"%s," % (
option["name"].string(),
data_type,
option["description"].string(),
defvalue)
def _ports_description_to_string(self, array, get_port_type):
if not array:
return ""
i = 0
r = []
while array[i]:
port_type = get_port_type(i)
r.append(self._port_description_to_string(i, array[i], port_type))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def _options_description_to_string(self, opts):
if not opts:
return ""
opts = opts.dereference()
array = opts["members"]
if not array:
return ""
i = 0
r = []
while array[i]["name"]:
r.append(self._option_description_to_string(array[i]))
i += 1
if i > 0:
r.append("\n ")
return "".join(r)
def to_string(self):
type = self.val
tdesc = get_type_description(type)
if tdesc:
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
get_port_out = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
p_type = type.address
ports_in = self._ports_description_to_string(tdesc["ports_in"], lambda idx: get_port_in(p_type, idx))
ports_out = self._ports_description_to_string(tdesc["ports_out"], lambda idx: get_port_out(p_type, idx))
options = self._options_description_to_string(tdesc["options"])
return "%s=%s" \
"\n name=\"%s\"," \
"\n category=\"%s\"," \
"\n description=\"%s\"," \
"\n ports_in={%s}," \
"\n ports_out={%s}," \
"\n options={%s})" % (
tdesc["symbol"].string(),
type.address,
tdesc["name"].string(),
tdesc["category"].string(),
tdesc["description"].string(),
ports_in,
ports_out,
options)
return "(struct sol_flow_node_type)%s (no node type description)" % (type.address,)
class FlowPrinter(object):
"Print a 'struct sol_flow_node'"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'sol_flow_node'
def to_string(self):
id = self.val["id"]
type = self.val["type"]
if not type:
return "sol_flow_node(%s) is under construction." % (
self.val.address,)
tname = "%#x (no node type description)" % (type.address,)
tdesc = get_type_description(type)
if tdesc:
tname = "%s(%s=%s)" % (
tdesc["name"].string(),
tdesc["symbol"].string(),
type.address)
return "sol_flow_node(%s, id=\"%s\", type=%s)" % (
self.val.address, id.string(), tname)
def sol_flow_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == "sol_flow_node":
return FlowPrinter(val)
elif lookup_tag == "sol_flow_node_type":
return FlowTypePrinter(val)
return None
def register_pretty_printers(objfile):
gdb.pretty_printers.append(sol_flow_pretty_printers)
def get_type_options_string(type, options):
if not options:
return ""
tdesc = get_type_description(type)
if not tdesc or not tdesc["options"] or not tdesc["options"]["members"]:
return "OPTIONS: %s (no node type description)\n" % (options,)
string = ""
opts_desc = tdesc["options"]
array = opts_desc["members"]
i = 0
string += "OPTIONS: (struct %s*)%s\n" % (tdesc["options_symbol"].string(), options)
opt_type = gdb.lookup_type("struct %s" % (tdesc["options_symbol"].string(),))
options = options.cast(opt_type.pointer())
while array[i]["name"]:
m = array[i]
name = m["name"].string()
data_type = m["data_type"].string()
description = m["description"].string()
value = options[name]
if data_type == "string":
if value:
value = value.string()
else:
value = "NULL"
defvalue_member = defvalue_member_map.get(data_type)
if not defvalue_member:
defvalue = ""
else:
defvalue = m["defvalue"][defvalue_member]
if data_type == "string":
if defvalue:
defvalue = defvalue.string()
else:
defvalue = "NULL"
defvalue = " (default=%s)" % (defvalue,)
string += " %s (%s) = %s // %s%s\n" % (name, data_type, value, description, defvalue)
i += 1
string += "\n"
return string
class InspectAndBreakIfMatches(gdb.Breakpoint):
class InternalBreak(gdb.Breakpoint):
def __init__(self, method, banner=None, matches=None, values=None):
addr = "*%s" % (method.cast(gdb.lookup_type("long")),)
self.method = method
self.banner = banner
self.matches = matches or {}
self.values = values or {}
gdb.Breakpoint.__init__(self, addr, gdb.BP_BREAKPOINT, internal=True, temporary=True)
def stop(self):
if self.banner:
if callable(self.banner):
self.banner(self.matches, self.values)
else:
gdb.write(self.banner)
return True
def __init__(self, spec, matches):
gdb.Breakpoint.__init__(self, spec, gdb.BP_BREAKPOINT, internal=False)
self.matches = {}
for k, v in matches.items():
self.matches[k] = get_str_or_regexp_match(v)
def print_matches(self, values=None):
gdb.write("%s matches:\n" % (self.__class__.__name__,), gdb.STDERR)
if not values:
values = {}
for k, func in self.matches.items():
v = values.get(k)
if v is None:
gdb.write(" %s = %s (no value provided)\n" % (k, func.__doc__), gdb.STDERR)
else:
try:
res = func(v)
except Exception as e:
res = "Exception executing match: %s" % (e,)
gdb.write(" %s = %s (value: '%s', match: %s)\n" %
(k, func.__doc__, v, res), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
def get_values(self):
raise NotImplemented()
def stop(self):
try:
values = self.get_values()
except Exception as e:
gdb.write("Exception at %s.get_values(): %s\n" % (
self.__class__.__name__, e), gdb.STDERR)
return False
if not values:
gdb.write("%s.get_values() did not return values.\n" % (
self.__class__.__name__,), gdb.STDERR)
return False
def print_values():
gdb.write("Values:\n", gdb.STDERR)
for k, v in values.items():
gdb.write(" %s: %s\n" % (k, v), gdb.STDERR)
gdb.write("\n", gdb.STDERR)
for k, match_func in self.matches.items():
try:
v = values[k]
except KeyError:
gdb.write("%s.get_values() did not provide key '%s'.\n" % (
self.__class__.__name__, k), gdb.STDERR)
self.print_matches(values)
print_values()
return False
try:
if not match_func(v):
return False
except Exception as e:
gdb.write("Exception at %s.stop() while matching %s %s (%s): %s\n" % (
self.__class__.__name__, k, v, match_func.__doc__, e,), gdb.STDERR)
self.print_matches(values)
return False
method = values.get("method")
banner = values.get("banner")
if not method:
node = values.get("node")
if node:
gdb.write("NODE: %s\n" % (node,), gdb.STDERR)
gdb.write("%s did not return the internal method to break at.\n" % (
self.__class__.__name__,), gdb.STDERR)
self.print_matches(values)
gdb.write("Breaking at the caller function %s\n" % (self.location,),
gdb.STDERR)
return True
def add_breakpoint():
try:
self.InternalBreak(method, banner, self.matches, values)
except Exception as e:
gdb.write("Could not add internal breakpoint: %s\n" % (e,), gdb.STDERR)
self.print_matches(values)
gdb.post_event(add_breakpoint)
return False
def get_str_or_regexp_match(string):
if not string:
string = "/.*/"
if len(string) > 2 and string.startswith("/") and string.endswith("/"):
r = re.compile(string[1:-1])
match = lambda x: bool(r.match(x))
else:
match = lambda x: string == x
match.__doc__ = string
return match
class FlowBreakOpen(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_init", matches)
def get_values(self):
node_id = gdb.parse_and_eval("name")
if node_id:
node_id = node_id.string()
type = gdb.parse_and_eval("type")
method = type["open"]
node = gdb.parse_and_eval("*node")
options = gdb.parse_and_eval("options")
def banner(matches, values):
gdb.write("""\
Break before opening node:
FUNCTION: %s
NODE....: %s (filter: %s)
%s""" % (method, node,
matches["node_id"].__doc__,
get_type_options_string(node["type"], options)))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakClose(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "sol_flow_node_fini", matches)
def get_values(self):
node = gdb.parse_and_eval("*node")
node_id = node["id"]
if node_id:
node_id = node_id.string()
type = node["type"]
method = type["close"]
def banner(matches, values):
gdb.write("""\
Break before closing node:
FUNCTION: %s
NODE....: %s (filter: %s)
""" % (method, node,
matches["node_id"].__doc__))
return {
"node": node,
"node_id": node_id,
"method": method,
"banner": banner,
}
class FlowBreakSend(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_send_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*src_node")
port = gdb.parse_and_eval("src_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_out_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
type = gdb.parse_and_eval("(struct sol_flow_node_container_type *)src_node->parent->type")
method = type["send"]
def banner(matches, values):
gdb.write("""\
Break before sending packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowBreakProcess(InspectAndBreakIfMatches):
def __init__(self, matches):
InspectAndBreakIfMatches.__init__(self, "inspector_will_deliver_packet", matches)
def get_values(self):
node = gdb.parse_and_eval("*dst_node")
port = gdb.parse_and_eval("dst_port")
packet = gdb.parse_and_eval("*packet")
node_id = node["id"]
if node_id:
node_id = node_id.string()
port_name = get_node_port_in_name_by_index(node, port)
packet_type = packet["type"]["name"].string()
get_port_in = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
type = node["type"]
port_type = get_port_in(type, port)
if not port_type:
method = None
else:
method = port_type["process"]
def banner(matches, values):
gdb.write("""\
Break before processing packet:
FUNCTION: %s
NODE....: %s (filter: %s)
PORT....: %s (index: %s, filter: %s)
PACKET..: %s (filter: %s)
""" % (
method,
node,
matches["node_id"].__doc__,
port_name,
port,
matches["port_name"].__doc__,
packet,
matches["packet_type"].__doc__))
return {
"node": node,
"node_id": node_id,
"port_name": port_name,
"packet_type": packet_type,
"method": method,
"banner": banner,
}
class FlowCommand(gdb.Command):
"Commands to operate with 'sol_flow'"
def __init__(self):
gdb.Command.__init__(self, "sol_flow", gdb.COMMAND_USER, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: break or print")
class FlowBreakCommand(gdb.Command):
"Add an execution break when sol_flow events happen."
def __init__(self):
gdb.Command.__init__(self, "sol_flow break", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: open, close, send or process")
class FlowBreakFilterBaseCommand(gdb.Command):
"""Base command for 'sol_flow break' subcommands.
The subcommand will be registered and will take matches as list of
optional arguments. If not available then None is assumed. These
parameters will be sent to breakpoint in order.
"""
def __init__(self, subcommand, matches, breakpoint):
gdb.Command.__init__(self, "sol_flow break " + subcommand, gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_SYMBOL, True)
self.matches = matches
self.breakpoint = breakpoint
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
params = {}
for i, name in enumerate(self.matches):
if len(arg) > i:
p = arg[i]
else:
p = None
params[name] = p
self.breakpoint(params)
self.dont_repeat()
class FlowBreakOpenCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is created (type->open).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break open timer
will break on nodes with id "timer" (exact match)
sol_flow break open /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "open", matches, FlowBreakOpen)
class FlowBreakCloseCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node is destroyed (type->close).
Arguments: node_id
node_id may be an exact string or a regular expression if enclosed
in "//". Examples:
sol_flow break close timer
will break on nodes with id "timer" (exact match)
sol_flow break close /^timer.*$/
will break on nodes with id that matches regular expression
"^timer.*$" (starts with "timer")
"""
def __init__(self):
matches = ["node_id"]
FlowBreakFilterBaseCommand.__init__(self, "close", matches, FlowBreakClose)
class FlowBreakSendCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node sends a packet on its output port.
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "send", matches, FlowBreakSend)
class FlowBreakProcessCommand(FlowBreakFilterBaseCommand):
"""Add an execution break when sol_flow_node will receive a packet on its input port (port's process()).
Arguments: node_id port_name packet_type
Each argument is optional and may be a string or a regular
expression if enclosed in "//". If omitted the regular expression
/.*/ is assumed, matching all patterns.
"""
def __init__(self):
matches = ["node_id", "port_name", "packet_type"]
FlowBreakFilterBaseCommand.__init__(self, "process", matches, FlowBreakProcess)
class FlowPrintCommand(gdb.Command):
"Print sol_flow types"
def __init__(self):
gdb.Command.__init__(self, "sol_flow print", gdb.COMMAND_BREAKPOINTS, gdb.COMPLETE_COMMAND, True)
def invoke(self, arg, from_tty):
raise gdb.GdbError("missing sub-command: type, port or options")
def get_node_type_from_exp(arg):
node = gdb.parse_and_eval(arg)
if not node:
raise gdb.GdbError("invalid node: %s" % (arg,))
gt = node.type.unqualified()
sol_flow_node_type = gdb.lookup_type("struct sol_flow_node")
sol_flow_node_type_type = gdb.lookup_type("struct sol_flow_node_type")
if gt == sol_flow_node_type or gt == sol_flow_node_type.pointer() or \
gt == sol_flow_node_type.const().pointer():
return node["type"]
elif gt == sol_flow_node_type_type or gt == sol_flow_node_type_type.pointer() or \
gt == sol_flow_node_type_type.const().pointer():
return node
else:
raise gdb.GdbError("invalid node: %s" % (arg,))
class FlowPrintTypeCommand(gdb.Command):
"""Prints the type information for the given 'struct sol_flow_node'.
Arguments: node
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print type", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
type = get_node_type_from_exp(arg[0])
gdb.write("%s\n" % (type.dereference(),))
class FlowPrintPortCommand(gdb.Command):
"""Prints the port information for the given node.
Arguments: node [direction] [filter_type] [filter_specifier]
node is the pointer to node where to find the port.
direction may be 'in', 'out' or 'both'. If omitted, both will be
assumed. May be omitted and 'both' is used.
filter_type may be 'all', 'number' or 'name'. If omitted, all
will be assumed.
If filter_type is 'number', then filter_specifier must be an integer.
If filter_type is 'name', then filter_specifier must be a string
or a regular expression enclosed in "//".
If filter_type is omitted, then it's gussed from filter_specifier.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print port", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def _print_ports(self, type, tdesc, member, filter):
array = tdesc[member]
if not array:
return
did = 0
i = 0
if member == "ports_in":
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_in")
else:
get_port_type = gdb.parse_and_eval("sol_flow_node_type_get_port_out")
while array[i]:
port = array[i]
if filter["type"] == "all" or \
(filter["type"] == "number" and filter["number"] == i) or \
(filter["type"] == "name" and filter["name"](port["name"].string())):
if did == 0:
gdb.write("%s:\n" % member)
did += 1
gdb.write(" %d: %s (%s)\n description: %s\n" % (
i,
port["name"].string(),
port["data_type"].string(),
port["description"].string(),
))
port_type = get_port_type(type, i)
if port_type["connect"]:
gdb.write(" connect(): %s\n" % (port_type["connect"],))
if port_type["disconnect"]:
gdb.write(" disconnect(): %s\n" % (port_type["disconnect"],))
if member == "ports_in" and port_type["process"]:
gdb.write(" process(): %s\n" % (port_type["process"],))
gdb.write("\n")
i += 1
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) < 1:
raise gdb.GdbError("missing pointer to struct sol_flow_node")
direction = "both"
filter = {"type": "all"}
if len(arg) > 1:
direction = arg[1]
if direction not in ("both", "in", "out"):
direction = "both"
try:
filter["number"] = int(arg[1])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[1])
filter["type"] = "name"
if len(arg) > 2:
filter["type"] = arg[2]
if filter["type"] not in ("all", "number", "name"):
try:
filter["number"] = int(arg[2])
filter["type"] = "number"
except ValueError:
filter["name"] = get_str_or_regexp_match(arg[2])
filter["type"] = "name"
elif filter["type"] == 'number':
if len(arg) < 4:
raise gdb.GdbError("missing port number to filter")
filter["number"] = int(arg[3])
elif filter["type"] == 'name':
if len(arg) < 4:
raise gdb.GdbError("missing port name to filter")
filter["name"] = get_str_or_regexp_match(arg[3])
type = get_node_type_from_exp(arg[0])
tdesc = get_type_description(type)
if not tdesc:
gdb.write("no node type description\n")
return
if direction == "both" or direction == "in":
self._print_ports(type, tdesc, "ports_in", filter)
if direction == "both" or direction == "out":
self._print_ports(type, tdesc, "ports_out", filter)
class FlowPrintOptionsCommand(gdb.Command):
"""Prints the options used to open the given node.
Arguments: node options
node is the pointer to node where to find the port.
options is the pointer to options to open to given node.
"""
def __init__(self):
gdb.Command.__init__(self, "sol_flow print options", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, True)
def invoke(self, arg, from_tty):
arg = gdb.string_to_argv(arg)
if len(arg) != 2:
raise gdb.GdbError("Usage: sol_flow print options <node> <options>")
type = get_node_type_from_exp(arg[0])
options = gdb.parse_and_eval(arg[1])
gdb.write(get_type_options_string(type, options))
FlowCommand()
FlowBreakCommand()
FlowBreakOpenCommand()
FlowBreakCloseCommand()
FlowBreakSendCommand()
FlowBreakProcessCommand()
FlowPrintCommand()
FlowPrintTypeCommand()
FlowPrintPortCommand()
FlowPrintOptionsCommand()
register_pretty_printers(gdb.current_objfile())
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class RoleList(ListResource):
""" """
def __init__(self, version, service_sid):
"""
Initialize the RoleList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service that the resource is associated with
:returns: twilio.rest.chat.v2.service.role.RoleList
:rtype: twilio.rest.chat.v2.service.role.RoleList
"""
super(RoleList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, }
self._uri = '/Services/{service_sid}/Roles'.format(**self._solution)
def create(self, friendly_name, type, permission):
"""
Create a new RoleInstance
:param unicode friendly_name: A string to describe the new resource
:param RoleInstance.RoleType type: The type of role
:param unicode permission: A permission the role should have
:returns: Newly created RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Type': type,
'Permission': serialize.map(permission, lambda e: e),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return RoleInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams RoleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.role.RoleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists RoleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.role.RoleInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of RoleInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RolePage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return RolePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of RoleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RolePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return RolePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a RoleContext
:param sid: The SID of the Role resource to fetch
:returns: twilio.rest.chat.v2.service.role.RoleContext
:rtype: twilio.rest.chat.v2.service.role.RoleContext
"""
return RoleContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a RoleContext
:param sid: The SID of the Role resource to fetch
:returns: twilio.rest.chat.v2.service.role.RoleContext
:rtype: twilio.rest.chat.v2.service.role.RoleContext
"""
return RoleContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.RoleList>'
class RolePage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the RolePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Service that the resource is associated with
:returns: twilio.rest.chat.v2.service.role.RolePage
:rtype: twilio.rest.chat.v2.service.role.RolePage
"""
super(RolePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of RoleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.role.RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
return RoleInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.RolePage>'
class RoleContext(InstanceContext):
""" """
def __init__(self, version, service_sid, sid):
"""
Initialize the RoleContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service to fetch the resource from
:param sid: The SID of the Role resource to fetch
:returns: twilio.rest.chat.v2.service.role.RoleContext
:rtype: twilio.rest.chat.v2.service.role.RoleContext
"""
super(RoleContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Roles/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a RoleInstance
:returns: Fetched RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return RoleInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the RoleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, permission):
"""
Update the RoleInstance
:param unicode permission: A permission the role should have
:returns: Updated RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
data = values.of({'Permission': serialize.map(permission, lambda e: e), })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return RoleInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.RoleContext {}>'.format(context)
class RoleInstance(InstanceResource):
""" """
class RoleType(object):
CHANNEL = "channel"
DEPLOYMENT = "deployment"
def __init__(self, version, payload, service_sid, sid=None):
"""
Initialize the RoleInstance
:returns: twilio.rest.chat.v2.service.role.RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
super(RoleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'friendly_name': payload.get('friendly_name'),
'type': payload.get('type'),
'permissions': payload.get('permissions'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RoleContext for this RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleContext
"""
if self._context is None:
self._context = RoleContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The SID of the Service that the resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def type(self):
"""
:returns: The type of role
:rtype: RoleInstance.RoleType
"""
return self._properties['type']
@property
def permissions(self):
"""
:returns: An array of the permissions the role has been granted
:rtype: unicode
"""
return self._properties['permissions']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the Role resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a RoleInstance
:returns: Fetched RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the RoleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, permission):
"""
Update the RoleInstance
:param unicode permission: A permission the role should have
:returns: Updated RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance
"""
return self._proxy.update(permission, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.RoleInstance {}>'.format(context)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""library to handle helix commands"""
import json
from restkit import Resource
from helixexceptions import HelixException
from helixexceptions import HelixAlreadyExistsException
from helixexceptions import HelixDoesNotExistException
class RestHelixFunctions:
def __init__(self, host):
if "http://" not in host:
self.host = "http://{0}".format(host)
else:
self.host = host
def _post_payload(self, path, data, **kwargs):
"""generic function to handle posting data
:rtype : return body of page
:param path: path to interact with
:param data: data to send
:param kwargs: additional keyword args
"""
res = Resource(self.host)
payload = "jsonParameters={0}".format(json.dumps(data))
for key, value in kwargs.items():
payload += '&{0}={1}'.format(key, json.dumps(value))
headers = {"Content-Type": "application/json"}
# print "path is %s" % path
page = res.post(path=path, payload=payload, headers=headers)
body = page.body_string()
if body:
body = json.loads(body)
if isinstance(body, dict) and "ERROR" in body:
raise HelixException(body["ERROR"])
# test what was returned, see if any exceptions need to be raise
# if not body:
# raise HelixException("body for path {0} is empty".format(path))
# else:
# print "BODY IS EMPTY FOR ", path
# print "BODY is %s." % body
return body
def _get_page(self, path):
"""if we're specifying a cluster then verify that a cluster is set"""
res = Resource(self.host)
page = res.get(path=path)
data = page.body_string()
body = None
try:
body = json.loads(data)
except ValueError:
body = json.loads(data[:-3])
# test what was returned, see if any exceptions need to be raise
if not body:
raise HelixException("body for path {0} is empty".format(path))
if isinstance(body, dict) and "ERROR" in body:
raise HelixException(body["ERROR"])
return body
def _delete_page(self, path):
"""delete page at a given path"""
retval = None
res = Resource(self.host)
page = res.delete(path)
data = page.body_string()
if data:
retval = json.loads(data)
return retval
def get_clusters(self):
""" querys helix cluster for all clusters """
return self._get_page("/clusters")["listFields"]["clusters"]
def get_resource_groups(self, cluster):
""" querys helix cluster for resources groups of the current cluster"""
return self._get_page("/clusters/{0}/resourceGroups".format(cluster))[
"listFields"]["ResourceGroups"]
def get_resource_tags(self, cluster):
"""returns a dict of resource tags for a cluster"""
return self._get_page("/clusters/{0}/resourceGroups".format(cluster))[
"mapFields"]["ResourceTags"]
def get_resource_group(self, cluster, resource):
""" gets the ideal state of the specified resource group of the
current cluster"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
return self._get_page("/clusters/{0}/resourceGroups/{1}".format(cluster,
resource))
def get_ideal_state(self, cluster, resource):
""" gets the ideal state of the specified resource group of the
current cluster"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
return self._get_page("/clusters/{0}/resourceGroups/{1}/idealState".
format(cluster, resource))["mapFields"]
def get_external_view(self, cluster, resource):
"""return the external view for a given cluster and resource"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
return self._get_page("/clusters/{0}/resourceGroups/{1}/externalView".format(
cluster, resource))["mapFields"]
def get_instances(self, cluster):
"""get list of instances registered to the cluster"""
if not cluster:
raise HelixException("Cluster must be set before "
"calling this function")
return self._get_page("/clusters/{0}/instances".format(cluster))[
"instanceInfo"]
def get_instance_detail(self, cluster, name):
"""get details of an instance"""
return self._get_page("/clusters/{0}/instances/{1}".format(cluster, name))
def get_config(self, cluster, config):
"""get requested config"""
return self._get_page("/clusters/{0}/configs/{1}".format(cluster, config))
def add_cluster(self, cluster):
"""add a cluster to helix"""
if cluster in self.get_clusters():
raise HelixAlreadyExistsException(
"Cluster {0} already exists".format(cluster))
data = {"command": "addCluster",
"clusterName": cluster}
page = self._post_payload("/clusters", data)
return page
def add_instance(self, cluster, instances, port):
"""add a list of instances to a cluster"""
if cluster not in self.get_clusters():
raise HelixDoesNotExistException(
"Cluster {0} does not exist".format(cluster))
if not isinstance(instances, list):
instances = [instances]
instances = ["{0}:{1}".format(instance, port) for instance in instances]
try:
newinstances = set(instances)
oldinstances = set(
[x["id"].replace('_', ':') for x in self.get_instances(cluster)])
instances = list(newinstances - oldinstances)
except HelixException:
# this will get thrown if instances is empty,
# which if we're just populating should happen
pass
if instances:
data = {"command": "addInstance",
"instanceNames": ";".join(instances)}
instance_path = "/clusters/{0}/instances".format(cluster)
# print "adding to", instance_path
page = self._post_payload(instance_path, data)
return page
else:
raise HelixAlreadyExistsException(
"All instances given already exist in cluster")
def rebalance(self, cluster, resource, replicas, key=""):
"""rebalance the given resource group"""
if resource not in self.get_resource_groups(cluster):
raise HelixException(
"{0} is not a resource group of {1}".format(resource, cluster))
data = {"command": "rebalance",
"replicas": replicas}
if key:
data["key"] = key
page = self._post_payload("/clusters/{0}/resourceGroups/{1}/idealState".format(
cluster, resource), data)
return page
def activate_cluster(self, cluster, grand_cluster, enabled=True):
"""activate the cluster with the grand cluster"""
if grand_cluster not in self.get_clusters():
raise HelixException(
"grand cluster {0} does not exist".format(grand_cluster))
data = {'command': 'activateCluster',
'grandCluster': grand_cluster}
if enabled:
data["enabled"] = "true"
else:
data["enabled"] = "false"
page = self._post_payload("/clusters/{0}".format(cluster), data)
return page
def deactivate_cluster(self, cluster, grand_cluster):
"""deactivate the cluster with the grand cluster"""
return activate_cluster(cluster, grand_cluster, enabled=False)
def add_resource(self, cluster, resource, partitions, state_model_def, mode=""):
"""Add given resource group"""
if resource in self.get_resource_groups(cluster):
raise HelixAlreadyExistsException(
"ResourceGroup {0} already exists".format(resource))
data = {"command": "addResource",
"resourceGroupName": resource,
"partitions": partitions,
"stateModelDefRef": state_model_def}
if mode:
data["mode"] = mode
return self._post_payload("/clusters/{0}/resourceGroups".format(cluster),
data)
def enable_resource(self, cluster, resource, enabled=True):
"""enable or disable specified resource"""
data = {"command": "enableResource"}
if enabled:
data["enabled"] = "true"
else:
data["enabled"] = "false"
return self._post_payload("/clusters/{0}/resourceGroups/{1}".format(
cluster, resource), data)
def disable_resource(self, cluster, resource):
"""function for disabling resources"""
return enable_resource(cluster, resource, enabled=False)
def alter_ideal_state(self, cluster, resource, newstate):
"""alter ideal state"""
data = {"command": "alterIdealState"}
return self._post_payload("/clusters/{0}/resourceGroups/{1}/idealState".format(
cluster, resource), data,
newIdealState=newstate)
def enable_instance(self, cluster, instance, enabled=True):
"""enable instance within cluster"""
data = {"command": "enableInstance"}
if enabled:
data["enabled"] = "true"
else:
data["enabled"] = "false"
return self._post_payload("/clusters/{0}/instances/{1}".format(cluster,
instance),
data)
def disable_instance(self, cluster, instance):
"""wrapper for ease of use for disabling an instance"""
return enable_instance(cluster, instance, enabled=False)
def swap_instance(self, cluster, old, new):
"""swap instance"""
data = {"command": "swapInstance",
"oldInstance": old,
"newInstance": new}
return self._post_payload("/cluster/{0}/instances".format(cluster), data)
def enable_partition(self, cluster, resource, partition, instance,
enabled=True):
"""enable Partition """
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
data = {"command": "enablePartition",
"resource": resource,
"partition": partition,
"enabled": enabled}
return self._post_payload("/clusters/{0}/instances/{1}".format(cluster,
instance),
data)
def disable_partition(self, cluster, resource, partitions, instance):
"""disable Partition """
return enable_partition(cluster, resource, partitions, instance,
enabled=False)
def reset_partition(self, cluster, resource, partitions, instance):
"""reset partition"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
data = {"command": "resetPartition",
"resource": resource,
"partition": " ".join(partitions)}
return self._post_payload("/clusters/{0}/instances/{1}".format(cluster,
instance),
data)
def reset_resource(self, cluster, resource):
"""reset resource"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
data = {"command": "resetResource"}
return self._post_payload("/clusters/{0}/resourceGroups/{1}".format(cluster,
resource),
data)
def reset_instance(self, cluster, instance):
"""reset instance"""
if instance not in self.get_instances(cluster):
raise HelixDoesNotExistException(
"Instance {0} does not exist".format(instance))
data = {"command": "resetInstance"}
return self._post_payload("/clusters/{0}/instances/{1}".format(cluster,
instance),
data)
def add_instance_tag(self, cluster, instance, tag):
"""add tag to an instance"""
data = {"command": "addInstanceTag",
"instanceGroupTag": tag}
return self._post_payload("/clusters/{0}/instances/{1}".format(
cluster, instance), data)
def del_instance_tag(self, cluster, instance, tag):
"""remove tag from instance"""
data = {"command": "removeInstanceTag",
"instanceGroupTag": tag}
return self._post_payload("/clusters/{0}/instances/{1}".format(
cluster, instance), data)
def add_resource_tag(self, cluster, resource, tag):
"""add tag to resource group"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
data = {"command": "addResourceProperty",
"INSTANCE_GROUP_TAG": tag}
return self._post_payload("/clusters/{0}/resourceGroups/{1}/idealState".format(
cluster, resource), data)
"""
del resource currently does not exist in helix api
def del_resource_tag(self, cluster, resource, tag):
if resource not in self.get_resource_groups(host, cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
data = {"command": "removeResourceProperty",
"INSTANCE_GROUP_TAG": tag}
return _post_payload(host,
"/clusters/{0}/resourceGroups/{1}/idealState".format(
cluster, resource), data)
"""
def get_instance_taginfo(self, cluster):
return self._get_page("/clusters/{0}/instances".format(
cluster))["tagInfo"]
def expand_cluster(self, cluster):
"""expand cluster"""
data = {"command": "expandCluster"}
return self._post_payload("/clusters/{0}/".format(cluster), data)
def expand_resource(self, cluster, resource):
"""expand resource"""
data = {"command": "expandResource"}
return self._post_payload("/clusters/{0}/resourceGroup/{1}/idealState".format(
cluster, resource), data)
def add_resource_property(self, cluster, resource, properties):
"""add resource property properties must be a dictionary of properties"""
properties["command"] = "addResourceProperty"
return self._post_payload("/clusters/{0}/resourceGroup/{1}/idealState".format(
cluster, resource), properties)
def _handle_config(self, cluster, configs, command, participant=None,
resource=None):
"""helper function to set or delete configs in helix"""
data = {"command": "{0}Config".format(command),
"configs": ",".join(
["{0}={1}".format(x, y) for x, y in configs.items()])}
address = "/clusters/{0}/configs/".format(cluster)
if participant:
address += "participant/{0}".format(participant)
elif resource:
address += "resource/{0}".format(resource)
else:
address += "cluster"
return self._post_payload(address, data)
def set_config(self, cluster, configs, participant=None, resource=None):
"""sets config in helix"""
return self._handle_config(cluster, configs, "set", participant, resource)
def remove_config(self, cluster, configs, participant=None, resource=None):
"""sets config in helix"""
return self._handle_config(host, "remove", cluster, configs, participant,
resource)
def get_zk_path(self, path):
"""get zookeeper path"""
return self._get_page("zkPath/{0}".format(path))
def del_zk_path(self, path):
"""delete zookeeper path"""
return self._delete_page("zkPath/{0}".format(path))
def get_zk_child(self, path):
"""get zookeeper child"""
return self._get_page("zkChild/{0}".format(path))
def del_zk_child(self, path):
"""delete zookeeper child"""
return self._delete_page("zkChild/{0}".format(path))
def add_state_model(self, cluster, newstate):
"""add state model"""
data = {"command": "addStateModel"}
return self._post_payload("/clusters/{0}/StateModelDefs".format(cluster),
data, newStateModelDef=newstate)
def del_instance(self, cluster, instance):
"""delete instance"""
if instance not in [x["id"] for x in self.get_instances(cluster)]:
raise HelixDoesNotExistException(
"Instance {0} does not exist.".format(instance))
page = self._delete_page("/clusters/{0}/instances/{1}".format(cluster,
instance))
return page
def del_resource(self, cluster, resource):
"""delete specified resource from cluster"""
if resource not in self.get_resource_groups(cluster):
raise HelixDoesNotExistException(
"ResourceGroup {0} does not exist".format(resource))
page = self._delete_page("/clusters/{0}/resourceGroups/{1}".format(
cluster, resource))
return page
def del_cluster(self, cluster):
"""delete cluster"""
page = self._delete_page("/clusters/{0}".format(cluster))
return page
def send_message(self, cluster, path, **kwargs):
pass
| |
import sys
import time
import unittest
from nose.tools import *
compat_24 = sys.version_info >= (2, 4)
class TestTools(unittest.TestCase):
def test_ok(self):
ok_(True)
try:
ok_(False, "message")
except AssertionError, e:
assert str(e) == "message"
else:
self.fail("ok_(False) did not raise assertion error")
def test_eq(self):
eq_(1, 1)
try:
eq_(1, 0, "message")
except AssertionError, e:
assert str(e) == "message"
else:
self.fail("eq_(1, 0) did not raise assertion error")
try:
eq_(1, 0)
except AssertionError, e:
assert str(e) == "1 != 0"
else:
self.fail("eq_(1, 0) did not raise assertion error")
def test_eq_unittest_flag(self):
"""Make sure eq_() is in a namespace that has __unittest = 1.
This lets tracebacks refrain from descending into the eq_ frame.
"""
assert '__unittest' in eq_.func_globals
def test_istest_unittest_flag(self):
"""Make sure istest() is not in a namespace that has __unittest = 1.
That is, make sure our __unittest labeling didn't get overzealous.
"""
assert '__unittest' not in istest.func_globals
def test_raises(self):
from nose.case import FunctionTestCase
def raise_typeerror():
raise TypeError("foo")
def noraise():
pass
raise_good = raises(TypeError)(raise_typeerror)
raise_other = raises(ValueError)(raise_typeerror)
no_raise = raises(TypeError)(noraise)
tc = FunctionTestCase(raise_good)
self.assertEqual(str(tc), "%s.%s" % (__name__, 'raise_typeerror'))
raise_good()
try:
raise_other()
except TypeError, e:
pass
else:
self.fail("raises did pass through unwanted exception")
try:
no_raise()
except AssertionError, e:
pass
else:
self.fail("raises did not raise assertion error on no exception")
def test_timed(self):
def too_slow():
time.sleep(.3)
too_slow = timed(.2)(too_slow)
def quick():
time.sleep(.1)
quick = timed(.2)(quick)
def check_result():
return 42
check_result = timed(.2)(check_result)
assert 42 == check_result()
quick()
try:
too_slow()
except TimeExpired:
pass
else:
self.fail("Slow test did not throw TimeExpired")
def test_make_decorator(self):
def func():
pass
func.setup = 'setup'
func.teardown = 'teardown'
def f1():
pass
f2 = make_decorator(func)(f1)
assert f2.setup == 'setup'
assert f2.teardown == 'teardown'
def test_nested_decorators(self):
from nose.tools import raises, timed, with_setup
def test():
pass
def foo():
pass
test = with_setup(foo, foo)(test)
test = timed(1.0)(test)
test = raises(TypeError)(test)
assert test.setup == foo
assert test.teardown == foo
def test_decorator_func_sorting(self):
from nose.tools import raises, timed, with_setup
from nose.util import func_lineno
def test1():
pass
def test2():
pass
def test3():
pass
def foo():
pass
test1_pos = func_lineno(test1)
test2_pos = func_lineno(test2)
test3_pos = func_lineno(test3)
test1 = raises(TypeError)(test1)
test2 = timed(1.0)(test2)
test3 = with_setup(foo)(test3)
self.assertEqual(func_lineno(test1), test1_pos)
self.assertEqual(func_lineno(test2), test2_pos)
self.assertEqual(func_lineno(test3), test3_pos)
def test_testcase_funcs(self):
import nose.tools
tc_asserts = [ at for at in dir(nose.tools)
if at.startswith('assert_') ]
print tc_asserts
# FIXME: not sure which of these are in all supported
# versions of python
assert 'assert_raises' in tc_asserts
if compat_24:
assert 'assert_true' in tc_asserts
def test_multiple_with_setup(self):
from nose.tools import with_setup
from nose.case import FunctionTestCase
from unittest import TestResult
called = []
def test():
called.append('test')
def test2():
called.append('test2')
def test3():
called.append('test3')
def s1():
called.append('s1')
def s2():
called.append('s2')
def s3():
called.append('s3')
def t1():
called.append('t1')
def t2():
called.append('t2')
def t3():
called.append('t3')
ws1 = with_setup(s1, t1)(test)
case1 = FunctionTestCase(ws1)
case1(TestResult())
self.assertEqual(called, ['s1', 'test', 't1'])
called[:] = []
ws2 = with_setup(s2, t2)(test2)
ws2 = with_setup(s1, t1)(ws2)
case2 = FunctionTestCase(ws2)
case2(TestResult())
self.assertEqual(called, ['s1', 's2', 'test2', 't2', 't1'])
called[:] = []
ws3 = with_setup(s3, t3)(test3)
ws3 = with_setup(s2, t2)(ws3)
ws3 = with_setup(s1, t1)(ws3)
case3 = FunctionTestCase(ws3)
case3(TestResult())
self.assertEqual(called, ['s1', 's2', 's3',
'test3', 't3', 't2', 't1'])
if __name__ == '__main__':
unittest.main()
| |
import os, sys
import pytest
from _pytest.monkeypatch import monkeypatch as MonkeyPatch
def pytest_funcarg__mp(request):
cwd = os.getcwd()
sys_path = list(sys.path)
def cleanup():
sys.path[:] = sys_path
os.chdir(cwd)
request.addfinalizer(cleanup)
return MonkeyPatch()
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
monkeypatch.setattr(A, 'y', 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, 'y')
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath:
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(pytest.fail.Exception,
lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(pytest.fail.Exception,
lambda: monkeypatch.setattr("os.path.qweqwe", None))
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr('os.path.qweqwe', 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
assert not hasattr(A, 'x')
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
monkeypatch.delattr(A, 'y', raising=False)
monkeypatch.setattr(A, 'x', 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.undo()
assert d['x'] == 1
assert 'y' not in d
d['x'] = 5
monkeypatch.undo()
assert d['x'] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
del d['x']
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, 'hello')
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert 'x' not in d
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
assert not d
monkeypatch.setitem(d, 'y', 1700)
assert d['y'] == 1700
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert d['x'] == 1500
monkeypatch.undo()
assert d == {'hello': 'world', 'x': 1}
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_delenv():
name = 'xyz1234'
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2, prepend="-")
assert os.environ['XYZ123'] == "2"
monkeypatch.setenv('XYZ123', 3, prepend="-")
assert os.environ['XYZ123'] == "3-2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "monkeypatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend('world')
mp.syspath_prepend('hello')
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
mp.syspath_prepend('hello world')
mp.undo()
sys.path.append('more hello world')
mp.undo()
assert sys.path[-1] == 'more hello world'
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile("""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*
""")
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld:
#oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize('Sample', [
SampleNew, SampleNewInherit,
SampleOld, SampleOldInherit,
], ids=['new', 'new-inherit', 'old', 'old-inherit'])
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, 'hello', None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
| |
import theano
import theano.tensor as T
import pickle
from collections import OrderedDict
import time
from .function_module import Function, WorkerFunction
from .collectives import shareds_registry
from . import exct
from .util import struct, PKL_FILE
# import ipdb
sync = None
synk_functions = list()
###############################################################################
# #
# API for building Functions #
# #
###############################################################################
def function(inputs, outputs=None, bcast_inputs=None, updates=None,
givens=None, sliceable_shareds=None, **kwargs):
"""Replacement for ``theano.function()``, with a similar interface. Builds
underlying Theano functions, including support for function slicing.
Args:
inputs: as in Theano, to be scattered among workers
outputs: as in Theano, with option to specify reduce operation (see
notes below)
bcast_inputs: as inputs in Theano, to be broadcast to all workers
updates: as in Theano, with option to specify reduct operation (see
notes below)
givens: as in Theano
sliceable_shareds: any implicit inputs (Theano shared variables) acting
as data-parallel data (i.e. to be subjected to the kwarg ``batch_s``
and /or to function slicing) must be listed here
**kwargs: passed on to all internal calls to ``theano.function()``
Reduce Operations:
Outputs: May be specified simply as Theano tensor variables, as in normal
Theano, or as two-tuples, as in (var, reduce-op), where reduce-op can be:
"avg", "sum", "max", "min", "prod", or None. Default is "avg".
Updates: May be specified as a list of two-tuples, as in normal Theano, or
may include triples, as in (var, update, reduce-op). Unlike for outputs,
the reduce-op here applies only when using function slicing. Every slice
is computed using the original values, and the update is accumulated over
the slices. (This may impose some limits on the form of the update
expression.) At the end of the function call, all updates are applied only
locally, within each worker. This provides clear control to user over
when to communicate.
Returns:
sykhronos.function_module.Function: callable object, replacing a
theano.Function
Raises:
RuntimeError: If Sykhronos not yet forked, or if already distributed
TypeError: If incorrect format for arguments.
ValueError: If entry in ``sliceable_shareds`` is not used in function,
or for invalid reduce operation requested.
"""
if not exct.state.forked:
raise RuntimeError("Must fork before making functions for GPU.")
if exct.state.distributed:
raise RuntimeError("Cannot make new functions after distributing (for now).")
if not isinstance(inputs, list):
raise TypeError("Input 'inputs' must be list.")
bcast_inputs = [] if bcast_inputs is None else bcast_inputs
if not isinstance(bcast_inputs, list):
raise TypeError("Input 'bcast_inputs' must be list if not None.")
reg_outputs, gpu_outputs, to_cpu, output_modes = process_outputs(outputs)
updates, update_vars, sliced_update_outs, update_modes = process_updates(updates)
theano_function = theano.function(
inputs=inputs + bcast_inputs,
outputs=reg_outputs,
updates=updates,
givens=givens,
**kwargs
)
functions = struct(theano_function=theano_function)
functions["f"] = theano.function(
inputs=inputs + bcast_inputs,
outputs=gpu_outputs,
updates=updates,
givens=givens,
**kwargs
)
if not updates and not sliceable_shareds:
functions["sliced"] = theano.function(
inputs=inputs + bcast_inputs,
outputs=gpu_outputs + sliced_update_outs,
givens=givens,
**kwargs
)
if sliceable_shareds:
slc_givens, lst_givens, slc_inputs, lst_input = \
process_givens(givens, sliceable_shareds, theano_function.get_shared())
functions["slc_in"] = theano.function(
inputs=inputs + bcast_inputs + slc_inputs,
outputs=gpu_outputs,
updates=updates,
givens=slc_givens,
**kwargs
)
functions["lst_in"] = theano.function(
inputs=inputs + bcast_inputs + [lst_input],
outputs=gpu_outputs,
updates=updates,
givens=lst_givens,
**kwargs
)
functions["sliced_slc_in"] = theano.function(
inputs=inputs + bcast_inputs + slc_inputs,
outputs=gpu_outputs + sliced_update_outs,
givens=slc_givens,
**kwargs
)
functions["sliced_lst_in"] = theano.function(
inputs=inputs + bcast_inputs + [lst_input],
outputs=gpu_outputs + sliced_update_outs,
givens=lst_givens,
**kwargs
)
synk_function = Function(ID=len(synk_functions),
functions=functions,
inputs=inputs,
bcast_inputs=bcast_inputs,
slc_shareds=sliceable_shareds,
update_vars=update_vars,
to_cpu=to_cpu,
collect_modes=output_modes + update_modes,
return_list=isinstance(outputs, list),
)
synk_functions.append(synk_function)
shareds_registry.register_func(theano_function)
return synk_function
def distribute():
"""Replicates all Synkhronos functions and their Theano shared variables in
worker processes / GPUs. It must be called after building the last
Synkhronos function and before calling any Synkhronos function.
It pickles all underlying Theano functions into one file, which workers
unpickle. All Theano shared variable data is included, and correspondences
between variables across functions is preserved. The pickle file is
automatically deleted by a worker. The default file location is in the
directory synkhronos/pkl/, but this can be changed by modifying ``PKL_PATH``
in synkhronos/util.py.
Raises:
RuntimeError: If not yet forked or if already distributed.
"""
if not exct.state.forked:
raise RuntimeError("Need to fork before distributing functions.")
print("Synkhronos distributing functions...")
t_start = time.time()
distribution = [sf._get_distro_info() for sf in synk_functions]
with open(PKL_FILE, "wb") as f:
pickle.dump(distribution, f, pickle.HIGHEST_PROTOCOL)
exct.launch(exct.DISTRIBUTE)
exct.join()
print("...distribution complete ({:.0f} s).".format(time.time() - t_start))
exct.state.distributed = True
###############################################################################
# #
# Helpers #
# #
###############################################################################
COLLECT_MODES = ["avg", "sum", "prod", "min", "max", "gather",
"c_avg", "c_sum", "c_prod", "c_min", "c_max", "c_gather", None]
def process_outputs(outputs):
if outputs is None:
return None, [], [], []
output_vars = list()
output_modes = list()
from theano.gpuarray.type import GpuArrayVariable
len_err = TypeError("Output tuples must be length 2: (var, collect_mode).")
if isinstance(outputs, tuple):
if len(outputs) != 2: raise len_err
output_vars.append(outputs[0])
output_modes.append(outputs[1])
elif isinstance(outputs, list):
for o in outputs:
if isinstance(o, tuple):
if len(o) != 2: raise len_err
output_vars.append(o[0])
output_modes.append(o[1])
else:
output_vars.append(o)
output_modes.append("avg") # (default)
else:
output_vars.append(outputs)
output_modes.append("avg")
check_collect_modes(output_modes)
to_cpu = [not isinstance(var, GpuArrayVariable) for var in output_vars]
gpu_vars = [var.transfer(None) for var in output_vars]
return output_vars, gpu_vars, to_cpu, output_modes
def process_updates(updates):
if updates is None:
return None, [], [], []
in_err = TypeError("Input 'updates' should be a list of tuples: "
"(var, new_value [, slice_mode])")
reg_updates = list()
update_vars = list()
update_gpu_outs = list()
update_modes = list()
if isinstance(updates, OrderedDict): # (legacy only)
for k, v in updates.items():
reg_updates.append((k, v))
update_vars.append(k)
update_gpu_outs.append(v.transfer(None))
update_modes.append("avg")
else:
if not isinstance(updates, list): raise in_err
for u in updates:
if not isinstance(u, tuple) or len(u) not in (2, 3): raise in_err
reg_updates.append((u[0], u[1]))
update_vars.append(u[0])
update_gpu_outs.append(u[1].transfer(None))
update_modes.append(u[2] if len(u) == 3 else "avg")
check_collect_modes(update_modes)
return reg_updates, update_vars, update_gpu_outs, update_modes
def process_givens(givens, sliceable_shareds, f_shareds):
if givens is None:
givens = list()
if isinstance(givens, (list, tuple)):
givens = {g[0]: g[1] for g in givens}
if not isinstance(sliceable_shareds, list):
raise TypeError("Optional param `sliceable_shareds` must be list.")
for var in sliceable_shareds:
if var not in f_shareds:
raise ValueError("At least one of sliceable_shareds not in "
"function's shareds: sliceable: {}, function's: {}".format(
sliceable_shareds, f_shareds))
start_input = T.lscalar('start')
stop_input = T.lscalar('stop')
slc_inputs = [start_input, stop_input]
lst_input = T.lvector('lst')
slc_givens = dict()
lst_givens = dict()
remaining_ss = list(sliceable_shareds)
for k, v in givens.items():
if v in sliceable_shareds:
slc_givens[k] = v[start_input:stop_input].transfer(None)
lst_givens[k] = v[lst_input].transfer(None) # NOTE: needed on gpu subtensor, probably just theano bug
if v in remaining_ss:
remaining_ss.pop(remaining_ss.index(v))
else:
slc_givens[k] = v
lst_givens[k] = v
for var in remaining_ss:
slc_givens[var] = var[start_input:stop_input].transfer(None)
lst_givens[var] = var[lst_input].transfer(None)
# FIXME: might not replace everywhere var is used, for instance if it is
# used in a given but is already an ancestor to another part of the graph,
# only the given will have the var replaced.
return slc_givens, lst_givens, slc_inputs, lst_input
# def process_givens_old(givens, sliced_shareds):
# if sliced_shareds is None:
# return givens, None, [], []
# import theano.tensor as T
# giv_err = TypeError("If using 'sliced_shareds', givens must be list of 2-tuples.")
# s_err = TypeError("Input 'sliced_shareds' must be list, elements are "
# "individual shared variables or 2-tuples: (var, given_var)")
# if givens is None: givens = list()
# if not isinstance(givens, list):
# raise giv_err
# for g in givens:
# if not isinstance(g, tuple) or len(g) != 2:
# raise giv_err
# if not isinstance(sliced_shareds, list): raise s_err
# start = T.lscalar()
# end = T.lscalar()
# slc_givens = list()
# slc_shareds = list()
# for ss in sliced_shareds:
# if isinstance(ss, tuple):
# if len(ss) != 2: raise s_err
# givens.append(ss)
# slc_givens.append((ss[0], ss[1][start:end]))
# slc_shareds.append(ss[1])
# else:
# slc_givens.append((ss, ss[start:end]))
# slc_shareds.append(ss)
# if len(givens) == 0: givens = None
# if len(slc_givens) == 0: slc_givens = None
# slc_idx_inputs = [] if slc_shareds is None else [start, end]
# return givens, slc_givens, slc_idx_inputs, slc_shareds
def check_collect_modes(collect_modes):
if any([mode not in COLLECT_MODES for mode in collect_modes]):
raise ValueError("Had an invalid collect mode in: \n{}"
"\n\tpossible modes are: \n{}".format(collect_modes, COLLECT_MODES))
###############################################################################
# #
# Worker Tasks #
# #
###############################################################################
def receive_distribution():
with open(PKL_FILE, "rb") as f:
distribution = pickle.load(f)
if sync.barrier.wait() == 0: # (only one worker does it)
import os
os.remove(PKL_FILE) # (leave no trace)
synk_funcs = list()
shareds_registry.reset()
for i, f_info in enumerate(distribution):
assert f_info["ID"] == i
synk_funcs.append(WorkerFunction(**f_info))
shareds_registry.register_func(f_info["functions"]["theano_function"])
return synk_funcs
| |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
import sys
from keystoneclient import auth
from keystoneclient import session as ks_session
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.i18n import _LI
from neutron import policy
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.StrOpt('default_ipv4_subnet_pool', default=None,
help=_("Default IPv4 subnet-pool to be used for automatic "
"subnet CIDR allocation")),
cfg.StrOpt('default_ipv6_subnet_pool', default=None,
help=_("Default IPv6 subnet-pool to be used for automatic "
"subnet CIDR allocation")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.StrOpt('dns_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("Hostname to be used by the neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"K release, at which point the check will be "
"mandatory.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context. '
'Deprecated in favour of an auth plugin in [nova].'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_tenant_name',
help=_('The name of the admin nova tenant. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=False,
help=_('If True, effort is made to advertise MTU settings '
'to VMs via network methods (DHCP and RA MTU options) '
'when the network\'s preferred MTU is known.')),
cfg.StrOpt('ipam_driver', default=None,
help=_('IPAM driver to use.')),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='neutron')
def set_db_defaults():
# Update the default QueuePool parameters. These can be tweaked by the
# conf variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(
cfg.CONF,
connection='sqlite://',
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
set_db_defaults()
NOVA_CONF_SECTION = 'nova'
nova_deprecated_opts = {
'cafile': [cfg.DeprecatedOpt('nova_ca_certificates_file', 'DEFAULT')],
'insecure': [cfg.DeprecatedOpt('nova_api_insecure', 'DEFAULT')],
}
ks_session.Session.register_conf_options(cfg.CONF, NOVA_CONF_SECTION,
deprecated_opts=nova_deprecated_opts)
auth.register_conf_options(cfg.CONF, NOVA_CONF_SECTION)
nova_opts = [
cfg.StrOpt('region_name',
deprecated_name='nova_region_name',
deprecated_group='DEFAULT',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
]
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
logging.register_options(cfg.CONF)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def reset_service():
# Reset worker in case SIGHUP is called.
# Note that this is called only in case a service is running in
# daemon mode.
setup_logging()
policy.refresh()
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_LI("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| |
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import emits_warning
import pickle
from sqlalchemy import Integer, String, UniqueConstraint, \
CheckConstraint, ForeignKey, MetaData, Sequence, \
ForeignKeyConstraint, ColumnDefault, Index, event,\
events, Unicode, types as sqltypes
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy import schema, exc
import sqlalchemy as tsa
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy.testing import ComparesTables, AssertsCompiledSQL
from sqlalchemy.testing import eq_, is_
class MetaDataTest(fixtures.TestBase, ComparesTables):
def test_metadata_connect(self):
metadata = MetaData()
t1 = Table('table1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(20)))
metadata.bind = testing.db
metadata.create_all()
try:
assert t1.count().scalar() == 0
finally:
metadata.drop_all()
def test_metadata_contains(self):
metadata = MetaData()
t1 = Table('t1', metadata, Column('x', Integer))
t2 = Table('t2', metadata, Column('x', Integer), schema='foo')
t3 = Table('t2', MetaData(), Column('x', Integer))
t4 = Table('t1', MetaData(), Column('x', Integer), schema='foo')
assert "t1" in metadata
assert "foo.t2" in metadata
assert "t2" not in metadata
assert "foo.t1" not in metadata
assert t1 in metadata
assert t2 in metadata
assert t3 not in metadata
assert t4 not in metadata
def test_uninitialized_column_copy(self):
for col in [
Column('foo', String(), nullable=False),
Column('baz', String(), unique=True),
Column(Integer(), primary_key=True),
Column('bar', Integer(), Sequence('foo_seq'), primary_key=True,
key='bar'),
Column(Integer(), ForeignKey('bat.blah'), doc="this is a col"),
Column('bar', Integer(), ForeignKey('bat.blah'), primary_key=True,
key='bar'),
Column('bar', Integer(), info={'foo': 'bar'}),
]:
c2 = col.copy()
for attr in ('name', 'type', 'nullable',
'primary_key', 'key', 'unique', 'info',
'doc'):
eq_(getattr(col, attr), getattr(c2, attr))
eq_(len(col.foreign_keys), len(c2.foreign_keys))
if col.default:
eq_(c2.default.name, 'foo_seq')
for a1, a2 in zip(col.foreign_keys, c2.foreign_keys):
assert a1 is not a2
eq_(a2._colspec, 'bat.blah')
def test_col_subclass_copy(self):
class MyColumn(schema.Column):
def __init__(self, *args, **kw):
self.widget = kw.pop('widget', None)
super(MyColumn, self).__init__(*args, **kw)
def copy(self, *arg, **kw):
c = super(MyColumn, self).copy(*arg, **kw)
c.widget = self.widget
return c
c1 = MyColumn('foo', Integer, widget='x')
c2 = c1.copy()
assert isinstance(c2, MyColumn)
eq_(c2.widget, 'x')
def test_uninitialized_column_copy_events(self):
msgs = []
def write(c, t):
msgs.append("attach %s.%s" % (t.name, c.name))
c1 = Column('foo', String())
m = MetaData()
for i in xrange(3):
cx = c1.copy()
# as of 0.7, these events no longer copy. its expected
# that listeners will be re-established from the
# natural construction of things.
cx._on_table_attach(write)
Table('foo%d' % i, m, cx)
eq_(msgs, ['attach foo0.foo', 'attach foo1.foo', 'attach foo2.foo'])
def test_schema_collection_add(self):
metadata = MetaData()
Table('t1', metadata, Column('x', Integer), schema='foo')
Table('t2', metadata, Column('x', Integer), schema='bar')
Table('t3', metadata, Column('x', Integer))
eq_(metadata._schemas, set(['foo', 'bar']))
eq_(len(metadata.tables), 3)
def test_schema_collection_remove(self):
metadata = MetaData()
t1 = Table('t1', metadata, Column('x', Integer), schema='foo')
Table('t2', metadata, Column('x', Integer), schema='bar')
t3 = Table('t3', metadata, Column('x', Integer), schema='bar')
metadata.remove(t3)
eq_(metadata._schemas, set(['foo', 'bar']))
eq_(len(metadata.tables), 2)
metadata.remove(t1)
eq_(metadata._schemas, set(['bar']))
eq_(len(metadata.tables), 1)
def test_schema_collection_remove_all(self):
metadata = MetaData()
Table('t1', metadata, Column('x', Integer), schema='foo')
Table('t2', metadata, Column('x', Integer), schema='bar')
metadata.clear()
eq_(metadata._schemas, set())
eq_(len(metadata.tables), 0)
def test_metadata_tables_immutable(self):
metadata = MetaData()
Table('t1', metadata, Column('x', Integer))
assert 't1' in metadata.tables
assert_raises(
TypeError,
lambda: metadata.tables.pop('t1')
)
@testing.provide_metadata
def test_dupe_tables(self):
metadata = self.metadata
Table('table1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(20)))
metadata.create_all()
Table('table1', metadata, autoload=True)
def go():
Table('table1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(20)))
assert_raises_message(
tsa.exc.InvalidRequestError,
"Table 'table1' is already defined for this "
"MetaData instance. Specify 'extend_existing=True' "
"to redefine options and columns on an existing "
"Table object.",
go
)
def test_fk_copy(self):
c1 = Column('foo', Integer)
c2 = Column('bar', Integer)
m = MetaData()
t1 = Table('t', m, c1, c2)
kw = dict(onupdate="X",
ondelete="Y", use_alter=True, name='f1',
deferrable="Z", initially="Q", link_to_name=True)
fk1 = ForeignKey(c1, **kw)
fk2 = ForeignKeyConstraint((c1,), (c2,), **kw)
t1.append_constraint(fk2)
fk1c = fk1.copy()
fk2c = fk2.copy()
for k in kw:
eq_(getattr(fk1c, k), kw[k])
eq_(getattr(fk2c, k), kw[k])
def test_check_constraint_copy(self):
r = lambda x: x
c = CheckConstraint("foo bar",
name='name',
initially=True,
deferrable=True,
_create_rule=r)
c2 = c.copy()
eq_(c2.name, 'name')
eq_(str(c2.sqltext), "foo bar")
eq_(c2.initially, True)
eq_(c2.deferrable, True)
assert c2._create_rule is r
def test_col_replace_w_constraint(self):
m = MetaData()
a = Table('a', m, Column('id', Integer, primary_key=True))
aid = Column('a_id', ForeignKey('a.id'))
b = Table('b', m, aid)
b.append_column(aid)
assert b.c.a_id.references(a.c.id)
eq_(len(b.constraints), 2)
def test_fk_construct(self):
c1 = Column('foo', Integer)
c2 = Column('bar', Integer)
m = MetaData()
t1 = Table('t', m, c1, c2)
fk1 = ForeignKeyConstraint(('foo', ), ('bar', ), table=t1)
assert fk1 in t1.constraints
def test_fk_no_such_parent_col_error(self):
meta = MetaData()
a = Table('a', meta, Column('a', Integer))
Table('b', meta, Column('b', Integer))
def go():
a.append_constraint(
ForeignKeyConstraint(['x'], ['b.b'])
)
assert_raises_message(
exc.ArgumentError,
"Can't create ForeignKeyConstraint on "
"table 'a': no column named 'x' is present.",
go
)
def test_fk_no_such_target_col_error(self):
meta = MetaData()
a = Table('a', meta, Column('a', Integer))
Table('b', meta, Column('b', Integer))
a.append_constraint(
ForeignKeyConstraint(['a'], ['b.x'])
)
def go():
list(a.c.a.foreign_keys)[0].column
assert_raises_message(
exc.NoReferencedColumnError,
"Could not create ForeignKey 'b.x' on "
"table 'a': table 'b' has no column named 'x'",
go
)
@testing.exclude('mysql', '<', (4, 1, 1), 'early types are squirrely')
def test_to_metadata(self):
meta = MetaData()
table = Table('mytable', meta,
Column('myid', Integer, Sequence('foo_id_seq'), primary_key=True),
Column('name', String(40), nullable=True),
Column('foo', String(40), nullable=False, server_default='x',
server_onupdate='q'),
Column('bar', String(40), nullable=False, default='y',
onupdate='z'),
Column('description', String(30),
CheckConstraint("description='hi'")),
UniqueConstraint('name'),
test_needs_fk=True,
)
table2 = Table('othertable', meta,
Column('id', Integer, Sequence('foo_seq'), primary_key=True),
Column('myid', Integer,
ForeignKey('mytable.myid'),
),
test_needs_fk=True,
)
def test_to_metadata():
meta2 = MetaData()
table_c = table.tometadata(meta2)
table2_c = table2.tometadata(meta2)
return (table_c, table2_c)
def test_pickle():
meta.bind = testing.db
meta2 = pickle.loads(pickle.dumps(meta))
assert meta2.bind is None
pickle.loads(pickle.dumps(meta2))
return (meta2.tables['mytable'], meta2.tables['othertable'])
def test_pickle_via_reflect():
# this is the most common use case, pickling the results of a
# database reflection
meta2 = MetaData(bind=testing.db)
t1 = Table('mytable', meta2, autoload=True)
Table('othertable', meta2, autoload=True)
meta3 = pickle.loads(pickle.dumps(meta2))
assert meta3.bind is None
assert meta3.tables['mytable'] is not t1
return (meta3.tables['mytable'], meta3.tables['othertable'])
meta.create_all(testing.db)
try:
for test, has_constraints, reflect in \
(test_to_metadata, True, False), \
(test_pickle, True, False), \
(test_pickle_via_reflect, False, True):
table_c, table2_c = test()
self.assert_tables_equal(table, table_c)
self.assert_tables_equal(table2, table2_c)
assert table is not table_c
assert table.primary_key is not table_c.primary_key
assert list(table2_c.c.myid.foreign_keys)[0].column \
is table_c.c.myid
assert list(table2_c.c.myid.foreign_keys)[0].column \
is not table.c.myid
assert 'x' in str(table_c.c.foo.server_default.arg)
if not reflect:
assert isinstance(table_c.c.myid.default, Sequence)
assert str(table_c.c.foo.server_onupdate.arg) == 'q'
assert str(table_c.c.bar.default.arg) == 'y'
assert getattr(table_c.c.bar.onupdate.arg, 'arg',
table_c.c.bar.onupdate.arg) == 'z'
assert isinstance(table2_c.c.id.default, Sequence)
# constraints dont get reflected for any dialect right
# now
if has_constraints:
for c in table_c.c.description.constraints:
if isinstance(c, CheckConstraint):
break
else:
assert False
assert str(c.sqltext) == "description='hi'"
for c in table_c.constraints:
if isinstance(c, UniqueConstraint):
break
else:
assert False
assert c.columns.contains_column(table_c.c.name)
assert not c.columns.contains_column(table.c.name)
finally:
meta.drop_all(testing.db)
def test_col_key_fk_parent_tometadata(self):
# test #2643
m1 = MetaData()
a = Table('a', m1, Column('x', Integer))
b = Table('b', m1, Column('x', Integer, ForeignKey('a.x'), key='y'))
assert b.c.y.references(a.c.x)
m2 = MetaData()
b2 = b.tometadata(m2)
a2 = a.tometadata(m2)
assert b2.c.y.references(a2.c.x)
def test_pickle_metadata_sequence_restated(self):
m1 = MetaData()
Table('a', m1,
Column('id', Integer, primary_key=True),
Column('x', Integer, Sequence("x_seq")))
m2 = pickle.loads(pickle.dumps(m1))
s2 = Sequence("x_seq")
t2 = Table('a', m2,
Column('id', Integer, primary_key=True),
Column('x', Integer, s2),
extend_existing=True)
assert m2._sequences['x_seq'] is t2.c.x.default
assert m2._sequences['x_seq'] is s2
def test_sequence_restated_replaced(self):
"""Test restatement of Sequence replaces."""
m1 = MetaData()
s1 = Sequence("x_seq")
t = Table('a', m1,
Column('x', Integer, s1)
)
assert m1._sequences['x_seq'] is s1
s2 = Sequence('x_seq')
Table('a', m1,
Column('x', Integer, s2),
extend_existing=True
)
assert t.c.x.default is s2
assert m1._sequences['x_seq'] is s2
def test_pickle_metadata_sequence_implicit(self):
m1 = MetaData()
Table('a', m1,
Column('id', Integer, primary_key=True),
Column('x', Integer, Sequence("x_seq")))
m2 = pickle.loads(pickle.dumps(m1))
t2 = Table('a', m2, extend_existing=True)
eq_(m2._sequences, {'x_seq': t2.c.x.default})
def test_pickle_metadata_schema(self):
m1 = MetaData()
Table('a', m1,
Column('id', Integer, primary_key=True),
Column('x', Integer, Sequence("x_seq")),
schema='y')
m2 = pickle.loads(pickle.dumps(m1))
Table('a', m2, schema='y',
extend_existing=True)
eq_(m2._schemas, m1._schemas)
def test_tometadata_with_schema(self):
meta = MetaData()
table = Table('mytable', meta,
Column('myid', Integer, primary_key=True),
Column('name', String(40), nullable=True),
Column('description', String(30),
CheckConstraint("description='hi'")),
UniqueConstraint('name'),
test_needs_fk=True,
)
table2 = Table('othertable', meta,
Column('id', Integer, primary_key=True),
Column('myid', Integer, ForeignKey('mytable.myid')),
test_needs_fk=True,
)
meta2 = MetaData()
table_c = table.tometadata(meta2, schema='someschema')
table2_c = table2.tometadata(meta2, schema='someschema')
eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid
== table2_c.c.myid))
eq_(str(table_c.join(table2_c).onclause),
'someschema.mytable.myid = someschema.othertable.myid')
def test_tometadata_with_default_schema(self):
meta = MetaData()
table = Table('mytable', meta,
Column('myid', Integer, primary_key=True),
Column('name', String(40), nullable=True),
Column('description', String(30),
CheckConstraint("description='hi'")),
UniqueConstraint('name'),
test_needs_fk=True,
schema='myschema',
)
table2 = Table('othertable', meta,
Column('id', Integer, primary_key=True),
Column('myid', Integer, ForeignKey('myschema.mytable.myid')),
test_needs_fk=True,
schema='myschema',
)
meta2 = MetaData()
table_c = table.tometadata(meta2)
table2_c = table2.tometadata(meta2)
eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid
== table2_c.c.myid))
eq_(str(table_c.join(table2_c).onclause),
'myschema.mytable.myid = myschema.othertable.myid')
def test_tometadata_kwargs(self):
meta = MetaData()
table = Table('mytable', meta,
Column('myid', Integer, primary_key=True),
mysql_engine='InnoDB',
)
meta2 = MetaData()
table_c = table.tometadata(meta2)
eq_(table.kwargs, table_c.kwargs)
def test_tometadata_indexes(self):
meta = MetaData()
table = Table('mytable', meta,
Column('id', Integer, primary_key=True),
Column('data1', Integer, index=True),
Column('data2', Integer),
)
Index('multi', table.c.data1, table.c.data2),
meta2 = MetaData()
table_c = table.tometadata(meta2)
def _get_key(i):
return [i.name, i.unique] + \
sorted(i.kwargs.items()) + \
i.columns.keys()
eq_(
sorted([_get_key(i) for i in table.indexes]),
sorted([_get_key(i) for i in table_c.indexes])
)
@emits_warning("Table '.+' already exists within the given MetaData")
def test_tometadata_already_there(self):
meta1 = MetaData()
table1 = Table('mytable', meta1,
Column('myid', Integer, primary_key=True),
)
meta2 = MetaData()
table2 = Table('mytable', meta2,
Column('yourid', Integer, primary_key=True),
)
table_c = table1.tometadata(meta2)
table_d = table2.tometadata(meta2)
# d'oh!
assert table_c is table_d
def test_metadata_schema_arg(self):
m1 = MetaData(schema='sch1')
m2 = MetaData(schema='sch1', quote_schema=True)
m3 = MetaData(schema='sch1', quote_schema=False)
m4 = MetaData()
for i, (name, metadata, schema, quote_schema,
exp_schema, exp_quote_schema) in enumerate([
('t1', m1, None, None, 'sch1', None),
('t2', m1, 'sch2', None, 'sch2', None),
('t3', m1, 'sch2', True, 'sch2', True),
('t4', m1, 'sch1', None, 'sch1', None),
('t1', m2, None, None, 'sch1', True),
('t2', m2, 'sch2', None, 'sch2', None),
('t3', m2, 'sch2', True, 'sch2', True),
('t4', m2, 'sch1', None, 'sch1', None),
('t1', m3, None, None, 'sch1', False),
('t2', m3, 'sch2', None, 'sch2', None),
('t3', m3, 'sch2', True, 'sch2', True),
('t4', m3, 'sch1', None, 'sch1', None),
('t1', m4, None, None, None, None),
('t2', m4, 'sch2', None, 'sch2', None),
('t3', m4, 'sch2', True, 'sch2', True),
('t4', m4, 'sch1', None, 'sch1', None),
]):
kw = {}
if schema is not None:
kw['schema'] = schema
if quote_schema is not None:
kw['quote_schema'] = quote_schema
t = Table(name, metadata, **kw)
eq_(t.schema, exp_schema, "test %d, table schema" % i)
eq_(t.quote_schema, exp_quote_schema,
"test %d, table quote_schema" % i)
seq = Sequence(name, metadata=metadata, **kw)
eq_(seq.schema, exp_schema, "test %d, seq schema" % i)
eq_(seq.quote_schema, exp_quote_schema,
"test %d, seq quote_schema" % i)
def test_manual_dependencies(self):
meta = MetaData()
a = Table('a', meta, Column('foo', Integer))
b = Table('b', meta, Column('foo', Integer))
c = Table('c', meta, Column('foo', Integer))
d = Table('d', meta, Column('foo', Integer))
e = Table('e', meta, Column('foo', Integer))
e.add_is_dependent_on(c)
a.add_is_dependent_on(b)
b.add_is_dependent_on(d)
e.add_is_dependent_on(b)
c.add_is_dependent_on(a)
eq_(
meta.sorted_tables,
[d, b, a, c, e]
)
def test_tometadata_default_schema_metadata(self):
meta = MetaData(schema='myschema')
table = Table('mytable', meta,
Column('myid', Integer, primary_key=True),
Column('name', String(40), nullable=True),
Column('description', String(30), CheckConstraint("description='hi'")),
UniqueConstraint('name'),
test_needs_fk=True
)
table2 = Table('othertable', meta,
Column('id', Integer, primary_key=True),
Column('myid', Integer, ForeignKey('myschema.mytable.myid')),
test_needs_fk=True
)
meta2 = MetaData(schema='someschema')
table_c = table.tometadata(meta2, schema=None)
table2_c = table2.tometadata(meta2, schema=None)
eq_(str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid))
eq_(str(table_c.join(table2_c).onclause),
"someschema.mytable.myid = someschema.othertable.myid")
def test_tometadata_strip_schema(self):
meta = MetaData()
table = Table('mytable', meta,
Column('myid', Integer, primary_key=True),
Column('name', String(40), nullable=True),
Column('description', String(30),
CheckConstraint("description='hi'")),
UniqueConstraint('name'),
test_needs_fk=True,
)
table2 = Table('othertable', meta,
Column('id', Integer, primary_key=True),
Column('myid', Integer, ForeignKey('mytable.myid')),
test_needs_fk=True,
)
meta2 = MetaData()
table_c = table.tometadata(meta2, schema=None)
table2_c = table2.tometadata(meta2, schema=None)
eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid
== table2_c.c.myid))
eq_(str(table_c.join(table2_c).onclause),
'mytable.myid = othertable.myid')
def test_nonexistent(self):
assert_raises(tsa.exc.NoSuchTableError, Table,
'fake_table',
MetaData(testing.db), autoload=True)
def test_assorted_repr(self):
t1 = Table("foo", MetaData(), Column("x", Integer))
i1 = Index("bar", t1.c.x)
ck = schema.CheckConstraint("x > y", name="someconstraint")
for const, exp in (
(Sequence("my_seq"),
"Sequence('my_seq')"),
(Sequence("my_seq", start=5),
"Sequence('my_seq', start=5)"),
(Column("foo", Integer),
"Column('foo', Integer(), table=None)"),
(Table("bar", MetaData(), Column("x", String)),
"Table('bar', MetaData(bind=None), "
"Column('x', String(), table=<bar>), schema=None)"),
(schema.DefaultGenerator(for_update=True),
"DefaultGenerator(for_update=True)"),
(schema.Index("bar", "c"), "Index('bar')"),
(i1, "Index('bar', Column('x', Integer(), table=<foo>))"),
(schema.FetchedValue(), "FetchedValue()"),
(ck,
"CheckConstraint("
"%s"
", name='someconstraint')" % repr(ck.sqltext)),
):
eq_(
repr(const),
exp
)
class TableTest(fixtures.TestBase, AssertsCompiledSQL):
@testing.skip_if('mssql', 'different col format')
def test_prefixes(self):
from sqlalchemy import Table
table1 = Table("temporary_table_1", MetaData(),
Column("col1", Integer),
prefixes=["TEMPORARY"])
self.assert_compile(
schema.CreateTable(table1),
"CREATE TEMPORARY TABLE temporary_table_1 (col1 INTEGER)"
)
table2 = Table("temporary_table_2", MetaData(),
Column("col1", Integer),
prefixes=["VIRTUAL"])
self.assert_compile(
schema.CreateTable(table2),
"CREATE VIRTUAL TABLE temporary_table_2 (col1 INTEGER)"
)
def test_table_info(self):
metadata = MetaData()
t1 = Table('foo', metadata, info={'x': 'y'})
t2 = Table('bar', metadata, info={})
t3 = Table('bat', metadata)
assert t1.info == {'x': 'y'}
assert t2.info == {}
assert t3.info == {}
for t in (t1, t2, t3):
t.info['bar'] = 'zip'
assert t.info['bar'] == 'zip'
def test_c_immutable(self):
m = MetaData()
t1 = Table('t', m, Column('x', Integer), Column('y', Integer))
assert_raises(
TypeError,
t1.c.extend, [Column('z', Integer)]
)
def assign():
t1.c['z'] = Column('z', Integer)
assert_raises(
TypeError,
assign
)
def assign2():
t1.c.z = Column('z', Integer)
assert_raises(
TypeError,
assign2
)
def test_autoincrement_replace(self):
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True)
)
is_(t._autoincrement_column, t.c.id)
t = Table('t', m,
Column('id', Integer, primary_key=True),
extend_existing=True
)
is_(t._autoincrement_column, t.c.id)
class SchemaTypeTest(fixtures.TestBase):
class MyType(sqltypes.SchemaType, sqltypes.TypeEngine):
column = None
table = None
evt_targets = ()
def _set_table(self, column, table):
super(SchemaTypeTest.MyType, self)._set_table(column, table)
self.column = column
self.table = table
def _on_table_create(self, target, bind, **kw):
self.evt_targets += (target,)
def test_independent_schema(self):
m = MetaData()
type_ = self.MyType(schema="q")
t1 = Table('x', m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "q")
def test_inherit_schema(self):
m = MetaData()
type_ = self.MyType(schema="q", inherit_schema=True)
t1 = Table('x', m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "z")
def test_independent_schema_enum(self):
m = MetaData()
type_ = sqltypes.Enum("a", schema="q")
t1 = Table('x', m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "q")
def test_inherit_schema_enum(self):
m = MetaData()
type_ = sqltypes.Enum("a", "b", "c", schema="q", inherit_schema=True)
t1 = Table('x', m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "z")
def test_tometadata_copy_type(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table('x', m1, Column("y", type_))
m2 = MetaData()
t2 = t1.tometadata(m2)
# metadata isn't set
is_(t2.c.y.type.metadata, None)
# our test type sets table, though
is_(t2.c.y.type.table, t2)
def test_tometadata_independent_schema(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table('x', m1, Column("y", type_))
m2 = MetaData()
t2 = t1.tometadata(m2, schema="bar")
eq_(t2.c.y.type.schema, None)
def test_tometadata_inherit_schema(self):
m1 = MetaData()
type_ = self.MyType(inherit_schema=True)
t1 = Table('x', m1, Column("y", type_))
m2 = MetaData()
t2 = t1.tometadata(m2, schema="bar")
eq_(t1.c.y.type.schema, None)
eq_(t2.c.y.type.schema, "bar")
def test_tometadata_independent_events(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table('x', m1, Column("y", type_))
m2 = MetaData()
t2 = t1.tometadata(m2)
t1.dispatch.before_create(t1, testing.db)
eq_(t1.c.y.type.evt_targets, (t1,))
eq_(t2.c.y.type.evt_targets, ())
t2.dispatch.before_create(t2, testing.db)
t2.dispatch.before_create(t2, testing.db)
eq_(t1.c.y.type.evt_targets, (t1,))
eq_(t2.c.y.type.evt_targets, (t2, t2))
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default_schema_metadata_fk(self):
m = MetaData(schema="foo")
t1 = Table('t1', m, Column('x', Integer))
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')))
assert t2.c.x.references(t1.c.x)
def test_ad_hoc_schema_equiv_fk(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer), schema="foo")
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')), schema="foo")
assert_raises(
exc.NoReferencedTableError,
lambda: t2.c.x.references(t1.c.x)
)
def test_default_schema_metadata_fk_alt_remote(self):
m = MetaData(schema="foo")
t1 = Table('t1', m, Column('x', Integer))
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')),
schema="bar")
assert t2.c.x.references(t1.c.x)
def test_default_schema_metadata_fk_alt_local_raises(self):
m = MetaData(schema="foo")
t1 = Table('t1', m, Column('x', Integer), schema="bar")
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')))
assert_raises(
exc.NoReferencedTableError,
lambda: t2.c.x.references(t1.c.x)
)
def test_default_schema_metadata_fk_alt_local(self):
m = MetaData(schema="foo")
t1 = Table('t1', m, Column('x', Integer), schema="bar")
t2 = Table('t2', m, Column('x', Integer, ForeignKey('bar.t1.x')))
assert t2.c.x.references(t1.c.x)
def test_create_drop_schema(self):
self.assert_compile(
schema.CreateSchema("sa_schema"),
"CREATE SCHEMA sa_schema"
)
self.assert_compile(
schema.DropSchema("sa_schema"),
"DROP SCHEMA sa_schema"
)
self.assert_compile(
schema.DropSchema("sa_schema", cascade=True),
"DROP SCHEMA sa_schema CASCADE"
)
def test_iteration(self):
metadata = MetaData()
table1 = Table('table1', metadata, Column('col1', Integer,
primary_key=True), schema='someschema')
table2 = Table('table2', metadata, Column('col1', Integer,
primary_key=True), Column('col2', Integer,
ForeignKey('someschema.table1.col1')),
schema='someschema')
t1 = str(schema.CreateTable(table1).compile(bind=testing.db))
t2 = str(schema.CreateTable(table2).compile(bind=testing.db))
if testing.db.dialect.preparer(testing.db.dialect).omit_schema:
assert t1.index("CREATE TABLE table1") > -1
assert t2.index("CREATE TABLE table2") > -1
else:
assert t1.index("CREATE TABLE someschema.table1") > -1
assert t2.index("CREATE TABLE someschema.table2") > -1
class UseExistingTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30)))
def _useexisting_fixture(self):
meta2 = MetaData(testing.db)
Table('users', meta2, autoload=True)
return meta2
def _notexisting_fixture(self):
return MetaData(testing.db)
def test_exception_no_flags(self):
meta2 = self._useexisting_fixture()
def go():
Table('users', meta2, Column('name',
Unicode), autoload=True)
assert_raises_message(
exc.InvalidRequestError,
"Table 'users' is already defined for this "\
"MetaData instance.",
go
)
@testing.uses_deprecated
def test_deprecated_useexisting(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
autoload=True, useexisting=True)
assert isinstance(users.c.name.type, Unicode)
assert not users.quote
users = Table('users', meta2, quote=True, autoload=True,
useexisting=True)
assert users.quote
def test_keep_plus_existing_raises(self):
meta2 = self._useexisting_fixture()
assert_raises(
exc.ArgumentError,
Table, 'users', meta2, keep_existing=True,
extend_existing=True
)
@testing.uses_deprecated
def test_existing_plus_useexisting_raises(self):
meta2 = self._useexisting_fixture()
assert_raises(
exc.ArgumentError,
Table, 'users', meta2, useexisting=True,
extend_existing=True
)
def test_keep_existing_no_dupe_constraints(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2,
Column('id', Integer),
Column('name', Unicode),
UniqueConstraint('name'),
keep_existing=True
)
assert 'name' in users.c
assert 'id' in users.c
eq_(len(users.constraints), 2)
u2 = Table('users', meta2,
Column('id', Integer),
Column('name', Unicode),
UniqueConstraint('name'),
keep_existing=True
)
eq_(len(u2.constraints), 2)
def test_extend_existing_dupes_constraints(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2,
Column('id', Integer),
Column('name', Unicode),
UniqueConstraint('name'),
extend_existing=True
)
assert 'name' in users.c
assert 'id' in users.c
eq_(len(users.constraints), 2)
u2 = Table('users', meta2,
Column('id', Integer),
Column('name', Unicode),
UniqueConstraint('name'),
extend_existing=True
)
# constraint got duped
eq_(len(u2.constraints), 3)
def test_keep_existing_coltype(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
autoload=True, keep_existing=True)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, quote=True, autoload=True,
keep_existing=True)
assert not users.quote
def test_keep_existing_add_column(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
autoload=True,
keep_existing=True)
assert "foo" not in users.c
def test_keep_existing_coltype_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
autoload=True, keep_existing=True)
assert isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2, quote=True,
autoload=True,
keep_existing=True)
assert users.quote
def test_keep_existing_add_column_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
autoload=True,
keep_existing=True)
assert "foo" in users.c
def test_keep_existing_coltype_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
keep_existing=True)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, quote=True,
keep_existing=True)
assert not users.quote
def test_keep_existing_add_column_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
keep_existing=True)
assert "foo" not in users.c
def test_extend_existing_coltype(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
autoload=True, extend_existing=True)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, quote=True, autoload=True,
extend_existing=True)
assert users.quote
def test_extend_existing_add_column(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
autoload=True,
extend_existing=True)
assert "foo" in users.c
def test_extend_existing_coltype_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
autoload=True, extend_existing=True)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2, quote=True,
autoload=True,
extend_existing=True)
assert users.quote
def test_extend_existing_add_column_no_orig(self):
meta2 = self._notexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
autoload=True,
extend_existing=True)
assert "foo" in users.c
def test_extend_existing_coltype_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, Column('name', Unicode),
extend_existing=True)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2, quote=True,
extend_existing=True)
assert users.quote
def test_extend_existing_add_column_no_reflection(self):
meta2 = self._useexisting_fixture()
users = Table('users', meta2,
Column('foo', Integer),
extend_existing=True)
assert "foo" in users.c
class ConstraintTest(fixtures.TestBase):
def _single_fixture(self):
m = MetaData()
t1 = Table('t1', m,
Column('a', Integer),
Column('b', Integer)
)
t2 = Table('t2', m,
Column('a', Integer, ForeignKey('t1.a'))
)
t3 = Table('t3', m,
Column('a', Integer)
)
return t1, t2, t3
def test_table_references(self):
t1, t2, t3 = self._single_fixture()
assert list(t2.c.a.foreign_keys)[0].references(t1)
assert not list(t2.c.a.foreign_keys)[0].references(t3)
def test_column_references(self):
t1, t2, t3 = self._single_fixture()
assert t2.c.a.references(t1.c.a)
assert not t2.c.a.references(t3.c.a)
assert not t2.c.a.references(t1.c.b)
def test_column_references_derived(self):
t1, t2, t3 = self._single_fixture()
s1 = tsa.select([tsa.select([t1]).alias()])
assert t2.c.a.references(s1.c.a)
assert not t2.c.a.references(s1.c.b)
def test_copy_doesnt_reference(self):
t1, t2, t3 = self._single_fixture()
a2 = t2.c.a.copy()
assert not a2.references(t1.c.a)
assert not a2.references(t1.c.b)
def test_derived_column_references(self):
t1, t2, t3 = self._single_fixture()
s1 = tsa.select([tsa.select([t2]).alias()])
assert s1.c.a.references(t1.c.a)
assert not s1.c.a.references(t1.c.b)
def test_invalid_composite_fk_check(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer), Column('y', Integer),
ForeignKeyConstraint(['x', 'y'], ['t2.x', 't3.y'])
)
t2 = Table('t2', m, Column('x', Integer))
t3 = Table('t3', m, Column('y', Integer))
assert_raises_message(
exc.ArgumentError,
r"ForeignKeyConstraint on t1\(x, y\) refers to "
"multiple remote tables: t2 and t3",
t1.join, t2
)
assert_raises_message(
exc.ArgumentError,
r"ForeignKeyConstraint on t1\(x, y\) refers to "
"multiple remote tables: t2 and t3",
t1.join, t3
)
assert_raises_message(
exc.ArgumentError,
r"ForeignKeyConstraint on t1\(x, y\) refers to "
"multiple remote tables: t2 and t3",
schema.CreateTable(t1).compile
)
class ColumnDefinitionTest(AssertsCompiledSQL, fixtures.TestBase):
"""Test Column() construction."""
__dialect__ = 'default'
def columns(self):
return [Column(Integer),
Column('b', Integer),
Column(Integer),
Column('d', Integer),
Column(Integer, name='e'),
Column(type_=Integer),
Column(Integer()),
Column('h', Integer()),
Column(type_=Integer())]
def test_basic(self):
c = self.columns()
for i, v in ((0, 'a'), (2, 'c'), (5, 'f'), (6, 'g'), (8, 'i')):
c[i].name = v
c[i].key = v
del i, v
tbl = Table('table', MetaData(), *c)
for i, col in enumerate(tbl.c):
assert col.name == c[i].name
def test_name_none(self):
c = Column(Integer)
assert_raises_message(
exc.ArgumentError,
"Column must be constructed with a non-blank name or assign a "
"non-blank .name ",
Table, 't', MetaData(), c)
def test_name_blank(self):
c = Column('', Integer)
assert_raises_message(
exc.ArgumentError,
"Column must be constructed with a non-blank name or assign a "
"non-blank .name ",
Table, 't', MetaData(), c)
def test_dupe_column(self):
c = Column('x', Integer)
Table('t', MetaData(), c)
assert_raises_message(
exc.ArgumentError,
"Column object already assigned to Table 't'",
Table, 'q', MetaData(), c)
def test_incomplete_key(self):
c = Column(Integer)
assert c.name is None
assert c.key is None
c.name = 'named'
Table('t', MetaData(), c)
assert c.name == 'named'
assert c.name == c.key
def test_bogus(self):
assert_raises(exc.ArgumentError, Column, 'foo', name='bar')
assert_raises(exc.ArgumentError, Column, 'foo', Integer,
type_=Integer())
def test_custom_subclass_proxy(self):
"""test proxy generation of a Column subclass, can be compiled."""
from sqlalchemy.schema import Column
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import select
class MyColumn(Column):
def _constructor(self, name, type, **kw):
kw['name'] = name
return MyColumn(type, **kw)
def __init__(self, type, **kw):
Column.__init__(self, type, **kw)
def my_goofy_thing(self):
return "hi"
@compiles(MyColumn)
def goofy(element, compiler, **kw):
s = compiler.visit_column(element, **kw)
return s + "-"
id = MyColumn(Integer, primary_key=True)
id.name = 'id'
name = MyColumn(String)
name.name = 'name'
t1 = Table('foo', MetaData(),
id,
name
)
# goofy thing
eq_(t1.c.name.my_goofy_thing(), "hi")
# create proxy
s = select([t1.select().alias()])
# proxy has goofy thing
eq_(s.c.name.my_goofy_thing(), "hi")
# compile works
self.assert_compile(
select([t1.select().alias()]),
"SELECT anon_1.id-, anon_1.name- FROM "
"(SELECT foo.id- AS id, foo.name- AS name "
"FROM foo) AS anon_1",
)
def test_custom_subclass_proxy_typeerror(self):
from sqlalchemy.schema import Column
from sqlalchemy.sql import select
class MyColumn(Column):
def __init__(self, type, **kw):
Column.__init__(self, type, **kw)
id = MyColumn(Integer, primary_key=True)
id.name = 'id'
name = MyColumn(String)
name.name = 'name'
t1 = Table('foo', MetaData(),
id,
name
)
assert_raises_message(
TypeError,
"Could not create a copy of this <class "
"'test.sql.test_metadata..*MyColumn'> "
"object. Ensure the class includes a _constructor()",
getattr, select([t1.select().alias()]), 'c'
)
def test_custom_create(self):
from sqlalchemy.ext.compiler import compiles, deregister
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
return text
t = Table('mytable', MetaData(),
Column('x', Integer, info={"special": True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special": True})
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE mytable (x SPECIAL DIRECTIVE INTEGER "
"NOT NULL, y VARCHAR(50), "
"z SPECIAL DIRECTIVE VARCHAR(20), PRIMARY KEY (x))"
)
deregister(schema.CreateColumn)
class ColumnDefaultsTest(fixtures.TestBase):
"""test assignment of default fixures to columns"""
def _fixture(self, *arg, **kw):
return Column('x', Integer, *arg, **kw)
def test_server_default_positional(self):
target = schema.DefaultClause('y')
c = self._fixture(target)
assert c.server_default is target
assert target.column is c
def test_onupdate_default_not_server_default_one(self):
target1 = schema.DefaultClause('y')
target2 = schema.DefaultClause('z')
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, 'y')
eq_(c.server_onupdate.arg, 'z')
def test_onupdate_default_not_server_default_two(self):
target1 = schema.DefaultClause('y', for_update=True)
target2 = schema.DefaultClause('z', for_update=True)
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, 'y')
eq_(c.server_onupdate.arg, 'z')
def test_onupdate_default_not_server_default_three(self):
target1 = schema.DefaultClause('y', for_update=False)
target2 = schema.DefaultClause('z', for_update=True)
c = self._fixture(target1, target2)
eq_(c.server_default.arg, 'y')
eq_(c.server_onupdate.arg, 'z')
def test_onupdate_default_not_server_default_four(self):
target1 = schema.DefaultClause('y', for_update=False)
c = self._fixture(server_onupdate=target1)
is_(c.server_default, None)
eq_(c.server_onupdate.arg, 'y')
def test_server_default_keyword_as_schemaitem(self):
target = schema.DefaultClause('y')
c = self._fixture(server_default=target)
assert c.server_default is target
assert target.column is c
def test_server_default_keyword_as_clause(self):
target = 'y'
c = self._fixture(server_default=target)
assert c.server_default.arg == target
assert c.server_default.column is c
def test_server_default_onupdate_positional(self):
target = schema.DefaultClause('y', for_update=True)
c = self._fixture(target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_schemaitem(self):
target = schema.DefaultClause('y', for_update=True)
c = self._fixture(server_onupdate=target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_clause(self):
target = 'y'
c = self._fixture(server_onupdate=target)
assert c.server_onupdate.arg == target
assert c.server_onupdate.column is c
def test_column_default_positional(self):
target = schema.ColumnDefault('y')
c = self._fixture(target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_schemaitem(self):
target = schema.ColumnDefault('y')
c = self._fixture(default=target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_clause(self):
target = 'y'
c = self._fixture(default=target)
assert c.default.arg == target
assert c.default.column is c
def test_column_default_onupdate_positional(self):
target = schema.ColumnDefault('y', for_update=True)
c = self._fixture(target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_schemaitem(self):
target = schema.ColumnDefault('y', for_update=True)
c = self._fixture(onupdate=target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_clause(self):
target = 'y'
c = self._fixture(onupdate=target)
assert c.onupdate.arg == target
assert c.onupdate.column is c
class ColumnOptionsTest(fixtures.TestBase):
def test_default_generators(self):
g1, g2 = Sequence('foo_id_seq'), ColumnDefault('f5')
assert Column(String, default=g1).default is g1
assert Column(String, onupdate=g1).onupdate is g1
assert Column(String, default=g2).default is g2
assert Column(String, onupdate=g2).onupdate is g2
def test_type_required(self):
assert_raises(exc.ArgumentError, Column)
assert_raises(exc.ArgumentError, Column, "foo")
assert_raises(exc.ArgumentError, Column, default="foo")
assert_raises(exc.ArgumentError, Column, Sequence("a"))
assert_raises(exc.ArgumentError, Column, "foo", default="foo")
assert_raises(exc.ArgumentError, Column, "foo", Sequence("a"))
Column(ForeignKey('bar.id'))
Column("foo", ForeignKey('bar.id'))
Column(ForeignKey('bar.id'), default="foo")
Column(ForeignKey('bar.id'), Sequence("a"))
Column("foo", ForeignKey('bar.id'), default="foo")
Column("foo", ForeignKey('bar.id'), Sequence("a"))
def test_column_info(self):
c1 = Column('foo', String, info={'x': 'y'})
c2 = Column('bar', String, info={})
c3 = Column('bat', String)
assert c1.info == {'x': 'y'}
assert c2.info == {}
assert c3.info == {}
for c in (c1, c2, c3):
c.info['bar'] = 'zip'
assert c.info['bar'] == 'zip'
class CatchAllEventsTest(fixtures.TestBase):
def teardown(self):
events.SchemaEventTarget.dispatch._clear()
def test_all_events(self):
canary = []
def before_attach(obj, parent):
canary.append("%s->%s" % (obj.__class__.__name__,
parent.__class__.__name__))
def after_attach(obj, parent):
canary.append("%s->%s" % (obj.__class__.__name__, parent))
event.listen(schema.SchemaItem, "before_parent_attach", before_attach)
event.listen(schema.SchemaItem, "after_parent_attach", after_attach)
m = MetaData()
Table('t1', m,
Column('id', Integer, Sequence('foo_id'), primary_key=True),
Column('bar', String, ForeignKey('t2.id'))
)
Table('t2', m,
Column('id', Integer, primary_key=True),
)
eq_(
canary,
['Sequence->Column', 'Sequence->id', 'ForeignKey->Column',
'ForeignKey->bar', 'Table->MetaData',
'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t1',
'Column->Table', 'Column->t1', 'Column->Table',
'Column->t1', 'ForeignKeyConstraint->Table',
'ForeignKeyConstraint->t1', 'Table->MetaData(bind=None)',
'Table->MetaData', 'PrimaryKeyConstraint->Table',
'PrimaryKeyConstraint->t2', 'Column->Table', 'Column->t2',
'Table->MetaData(bind=None)']
)
def test_events_per_constraint(self):
canary = []
def evt(target):
def before_attach(obj, parent):
canary.append("%s->%s" % (target.__name__,
parent.__class__.__name__))
def after_attach(obj, parent):
canary.append("%s->%s" % (target.__name__, parent))
event.listen(target, "before_parent_attach", before_attach)
event.listen(target, "after_parent_attach", after_attach)
for target in [
schema.ForeignKeyConstraint, schema.PrimaryKeyConstraint,
schema.UniqueConstraint,
schema.CheckConstraint
]:
evt(target)
m = MetaData()
Table('t1', m,
Column('id', Integer, Sequence('foo_id'), primary_key=True),
Column('bar', String, ForeignKey('t2.id')),
Column('bat', Integer, unique=True),
)
Table('t2', m,
Column('id', Integer, primary_key=True),
Column('bar', Integer),
Column('bat', Integer),
CheckConstraint("bar>5"),
UniqueConstraint('bar', 'bat')
)
eq_(
canary,
[
'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t1',
'ForeignKeyConstraint->Table', 'ForeignKeyConstraint->t1',
'UniqueConstraint->Table', 'UniqueConstraint->t1',
'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t2',
'CheckConstraint->Table', 'CheckConstraint->t2',
'UniqueConstraint->Table', 'UniqueConstraint->t2'
]
)
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from st2common import log as logging
from st2common.constants.triggers import CRON_TIMER_TRIGGER_REF
from st2common.exceptions.sensors import TriggerTypeRegistrationException
from st2common.exceptions.triggers import TriggerDoesNotExistException
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.api.trigger import TriggerAPI, TriggerTypeAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.trigger import Trigger, TriggerType
__all__ = [
"add_trigger_models",
"get_trigger_db_by_ref",
"get_trigger_db_by_id",
"get_trigger_db_by_uid",
"get_trigger_db_by_ref_or_dict",
"get_trigger_db_given_type_and_params",
"get_trigger_type_db",
"create_trigger_db",
"create_trigger_type_db",
"create_or_update_trigger_db",
"create_or_update_trigger_type_db",
]
LOG = logging.getLogger(__name__)
def get_trigger_db_given_type_and_params(type=None, parameters=None):
try:
parameters = parameters or {}
trigger_dbs = Trigger.query(type=type, parameters=parameters)
trigger_db = trigger_dbs[0] if len(trigger_dbs) > 0 else None
# NOTE: This is a work-around which we might be able to remove once we upgrade
# pymongo and mongoengine
# Work around for cron-timer when in some scenarios finding an object fails when Python
# value types are unicode :/
is_cron_trigger = type == CRON_TIMER_TRIGGER_REF
has_parameters = bool(parameters)
if not trigger_db and six.PY2 and is_cron_trigger and has_parameters:
non_unicode_literal_parameters = {}
for key, value in six.iteritems(parameters):
key = key.encode("utf-8")
if isinstance(value, six.text_type):
# We only encode unicode to str
value = value.encode("utf-8")
non_unicode_literal_parameters[key] = value
parameters = non_unicode_literal_parameters
trigger_dbs = Trigger.query(
type=type, parameters=non_unicode_literal_parameters
).no_cache()
# Note: We need to directly access the object, using len or accessing the query set
# twice won't work - there seems to bug a bug with cursor where accessing it twice
# will throw an exception
try:
trigger_db = trigger_dbs[0]
except IndexError:
trigger_db = None
if not parameters and not trigger_db:
# We need to do double query because some TriggeDB objects without
# parameters have "parameters" attribute stored in the db and others
# don't
trigger_db = Trigger.query(type=type, parameters=None).first()
return trigger_db
except StackStormDBObjectNotFoundError as e:
LOG.debug(
'Database lookup for type="%s" parameters="%s" resulted '
+ "in exception : %s.",
type,
parameters,
e,
exc_info=True,
)
return None
def get_trigger_db_by_ref_or_dict(trigger):
"""
Retrieve TriggerDB object based on the trigger reference of based on a
provided dictionary with trigger attributes.
"""
# TODO: This is nasty, this should take a unique reference and not a dict
if isinstance(trigger, six.string_types):
trigger_db = get_trigger_db_by_ref(trigger)
else:
# If id / uid is available we try to look up Trigger by id. This way we can avoid bug in
# pymongo / mongoengine related to "parameters" dictionary lookups
trigger_id = trigger.get("id", None)
trigger_uid = trigger.get("uid", None)
# TODO: Remove parameters dictionary look up when we can confirm each trigger dictionary
# passed to this method always contains id or uid
if trigger_id:
LOG.debug("Looking up TriggerDB by id: %s", trigger_id)
trigger_db = get_trigger_db_by_id(id=trigger_id)
elif trigger_uid:
LOG.debug("Looking up TriggerDB by uid: %s", trigger_uid)
trigger_db = get_trigger_db_by_uid(uid=trigger_uid)
else:
# Last resort - look it up by parameters
trigger_type = trigger.get("type", None)
parameters = trigger.get("parameters", {})
LOG.debug(
"Looking up TriggerDB by type and parameters: type=%s, parameters=%s",
trigger_type,
parameters,
)
trigger_db = get_trigger_db_given_type_and_params(
type=trigger_type, parameters=parameters
)
return trigger_db
def get_trigger_db_by_id(id):
"""
Returns the trigger object from db given a trigger id.
:param ref: Reference to the trigger db object.
:type ref: ``str``
:rtype: ``object``
"""
try:
return Trigger.get_by_id(id)
except StackStormDBObjectNotFoundError as e:
LOG.debug(
'Database lookup for id="%s" resulted in exception : %s.',
id,
e,
exc_info=True,
)
return None
def get_trigger_db_by_uid(uid):
"""
Returns the trigger object from db given a trigger uid.
:param ref: Reference to the trigger db object.
:type ref: ``str``
:rtype: ``object``
"""
try:
return Trigger.get_by_uid(uid)
except StackStormDBObjectNotFoundError as e:
LOG.debug(
'Database lookup for uid="%s" resulted in exception : %s.',
uid,
e,
exc_info=True,
)
return None
def get_trigger_db_by_ref(ref):
"""
Returns the trigger object from db given a string ref.
:param ref: Reference to the trigger db object.
:type ref: ``str``
:rtype trigger_type: ``object``
"""
try:
return Trigger.get_by_ref(ref)
except StackStormDBObjectNotFoundError as e:
LOG.debug(
'Database lookup for ref="%s" resulted ' + "in exception : %s.",
ref,
e,
exc_info=True,
)
return None
def _get_trigger_db(trigger):
# TODO: This method should die in a fire
# XXX: Do not make this method public.
if isinstance(trigger, dict):
name = trigger.get("name", None)
pack = trigger.get("pack", None)
if name and pack:
ref = ResourceReference.to_string_reference(name=name, pack=pack)
return get_trigger_db_by_ref(ref)
return get_trigger_db_given_type_and_params(
type=trigger["type"], parameters=trigger.get("parameters", {})
)
else:
raise Exception("Unrecognized object")
def get_trigger_type_db(ref):
"""
Returns the trigger type object from db given a string ref.
:param ref: Reference to the trigger type db object.
:type ref: ``str``
:rtype trigger_type: ``object``
"""
try:
return TriggerType.get_by_ref(ref)
except StackStormDBObjectNotFoundError as e:
LOG.debug(
'Database lookup for ref="%s" resulted ' + "in exception : %s.",
ref,
e,
exc_info=True,
)
return None
def _get_trigger_dict_given_rule(rule):
trigger = rule.trigger
trigger_dict = {}
triggertype_ref = ResourceReference.from_string_reference(trigger.get("type"))
trigger_dict["pack"] = trigger_dict.get("pack", triggertype_ref.pack)
trigger_dict["type"] = triggertype_ref.ref
trigger_dict["parameters"] = rule.trigger.get("parameters", {})
return trigger_dict
def create_trigger_db(trigger_api):
# TODO: This is used only in trigger API controller. We should get rid of this.
trigger_ref = ResourceReference.to_string_reference(
name=trigger_api.name, pack=trigger_api.pack
)
trigger_db = get_trigger_db_by_ref(trigger_ref)
if not trigger_db:
trigger_db = TriggerAPI.to_model(trigger_api)
LOG.debug("Verified trigger and formulated TriggerDB=%s", trigger_db)
trigger_db = Trigger.add_or_update(trigger_db)
return trigger_db
def create_or_update_trigger_db(trigger, log_not_unique_error_as_debug=False):
"""
Create a new TriggerDB model if one doesn't exist yet or update existing
one.
:param trigger: Trigger info.
:type trigger: ``dict``
"""
if not isinstance(trigger, dict):
raise ValueError(
"The trigger has a value that is not a dictionary"
f" (was {type(trigger)})."
)
existing_trigger_db = _get_trigger_db(trigger)
if existing_trigger_db:
is_update = True
else:
is_update = False
trigger_api = TriggerAPI(**trigger)
trigger_api.validate()
trigger_db = TriggerAPI.to_model(trigger_api)
if is_update:
trigger_db.id = existing_trigger_db.id
trigger_db = Trigger.add_or_update(
trigger_db, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
extra = {"trigger_db": trigger_db}
if is_update:
LOG.audit("Trigger updated. Trigger.id=%s" % (trigger_db.id), extra=extra)
else:
LOG.audit("Trigger created. Trigger.id=%s" % (trigger_db.id), extra=extra)
return trigger_db
def create_trigger_db_from_rule(rule):
trigger_dict = _get_trigger_dict_given_rule(rule)
existing_trigger_db = _get_trigger_db(trigger_dict)
# For simple triggertypes (triggertype with no parameters), we create a trigger when
# registering triggertype. So if we hit the case that there is no trigger in db but
# parameters is empty, then this case is a run time error.
if not trigger_dict.get("parameters", {}) and not existing_trigger_db:
raise TriggerDoesNotExistException(
"A simple trigger should have been created when registering "
"triggertype. Cannot create trigger: %s." % (trigger_dict)
)
if not existing_trigger_db:
trigger_db = create_or_update_trigger_db(trigger_dict)
else:
trigger_db = existing_trigger_db
# Special reference counting for trigger with parameters.
# if trigger_dict.get('parameters', None):
# Trigger.update(trigger_db, inc__ref_count=1)
return trigger_db
def increment_trigger_ref_count(rule_api):
"""
Given the rule figures out the TriggerType with parameter and increments
reference count on the appropriate Trigger.
:param rule_api: Rule object used to infer the Trigger.
:type rule_api: ``RuleApi``
"""
trigger_dict = _get_trigger_dict_given_rule(rule_api)
# Special reference counting for trigger with parameters.
if trigger_dict.get("parameters", None):
trigger_db = _get_trigger_db(trigger_dict)
Trigger.update(trigger_db, inc__ref_count=1)
def cleanup_trigger_db_for_rule(rule_db):
# rule.trigger is actually trigger_db ref.
existing_trigger_db = get_trigger_db_by_ref(rule_db.trigger)
if not existing_trigger_db or not existing_trigger_db.parameters:
# nothing to be done here so moving on.
LOG.debug("ref_count decrement for %s not required.", existing_trigger_db)
return
Trigger.update(existing_trigger_db, dec__ref_count=1)
Trigger.delete_if_unreferenced(existing_trigger_db)
def create_trigger_type_db(trigger_type, log_not_unique_error_as_debug=False):
"""
Creates a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
:rtype: ``object``
"""
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
trigger_type_db = get_trigger_type_db(ref)
if not trigger_type_db:
trigger_type_db = TriggerTypeAPI.to_model(trigger_type_api)
LOG.debug("verified trigger and formulated TriggerDB=%s", trigger_type_db)
trigger_type_db = TriggerType.add_or_update(
trigger_type_db, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
return trigger_type_db
def create_shadow_trigger(trigger_type_db, log_not_unique_error_as_debug=False):
"""
Create a shadow trigger for TriggerType with no parameters.
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
"""
trigger_type_ref = trigger_type_db.get_reference().ref
if trigger_type_db.parameters_schema:
LOG.debug(
"Skip shadow trigger for TriggerType with parameters %s.", trigger_type_ref
)
return None
trigger = {
"name": trigger_type_db.name,
"pack": trigger_type_db.pack,
"type": trigger_type_ref,
"parameters": {},
}
return create_or_update_trigger_db(
trigger, log_not_unique_error_as_debug=log_not_unique_error_as_debug
)
def create_or_update_trigger_type_db(trigger_type, log_not_unique_error_as_debug=False):
"""
Create or update a trigger type db object in the db given trigger_type definition as dict.
:param trigger_type: Trigger type model.
:type trigger_type: ``dict``
:param log_not_unique_error_as_debug: True to lot NotUnique errors under debug instead of
error log level. This is to be used in scenarios where
failure is non-fatal (e.g. when services register
internal trigger types which is an idempotent
operation).
:type log_not_unique_error_as_debug: ``bool``
:rtype: ``object``
"""
if not isinstance(trigger_type, dict):
raise ValueError(
"The trigger has a value that is not a dictionary"
f" (was {type(trigger_type)})."
)
trigger_type_api = TriggerTypeAPI(**trigger_type)
trigger_type_api.validate()
trigger_type_api = TriggerTypeAPI.to_model(trigger_type_api)
ref = ResourceReference.to_string_reference(
name=trigger_type_api.name, pack=trigger_type_api.pack
)
existing_trigger_type_db = get_trigger_type_db(ref)
if existing_trigger_type_db:
is_update = True
else:
is_update = False
if is_update:
trigger_type_api.id = existing_trigger_type_db.id
try:
trigger_type_db = TriggerType.add_or_update(
trigger_type_api,
log_not_unique_error_as_debug=log_not_unique_error_as_debug,
)
except StackStormDBObjectConflictError:
# Operation is idempotent and trigger could have already been created by
# another process. Ignore object already exists because it simply means
# there was a race and object is already in the database.
trigger_type_db = get_trigger_type_db(ref)
is_update = True
extra = {"trigger_type_db": trigger_type_db}
if is_update:
LOG.audit(
"TriggerType updated. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
else:
LOG.audit(
"TriggerType created. TriggerType.id=%s" % (trigger_type_db.id), extra=extra
)
return trigger_type_db
def _create_trigger_type(
pack,
name,
description=None,
payload_schema=None,
parameters_schema=None,
tags=None,
metadata_file=None,
):
trigger_type = {
"name": name,
"pack": pack,
"description": description,
"payload_schema": payload_schema,
"parameters_schema": parameters_schema,
"tags": tags,
"metadata_file": metadata_file,
}
return create_or_update_trigger_type_db(trigger_type=trigger_type)
def _validate_trigger_type(trigger_type):
"""
XXX: We need validator objects that define the required and optional fields.
For now, manually check them.
"""
required_fields = ["name"]
for field in required_fields:
if field not in trigger_type:
raise TriggerTypeRegistrationException(
'Invalid trigger type. Missing field "%s"' % (field)
)
def _create_trigger(trigger_type):
"""
:param trigger_type: TriggerType db object.
:type trigger_type: :class:`TriggerTypeDB`
"""
if (
hasattr(trigger_type, "parameters_schema")
and not trigger_type["parameters_schema"]
):
trigger_dict = {
"name": trigger_type.name,
"pack": trigger_type.pack,
"type": trigger_type.get_reference().ref,
}
try:
return create_or_update_trigger_db(trigger=trigger_dict)
except:
LOG.exception("Validation failed for Trigger=%s.", trigger_dict)
raise TriggerTypeRegistrationException(
"Unable to create Trigger for TriggerType=%s." % trigger_type.name
)
else:
LOG.debug(
"Won't create Trigger object as TriggerType %s expects " + "parameters.",
trigger_type,
)
return None
def _add_trigger_models(trigger_type):
pack = trigger_type["pack"]
description = trigger_type["description"] if "description" in trigger_type else ""
payload_schema = (
trigger_type["payload_schema"] if "payload_schema" in trigger_type else {}
)
parameters_schema = (
trigger_type["parameters_schema"] if "parameters_schema" in trigger_type else {}
)
tags = trigger_type.get("tags", [])
metadata_file = trigger_type.get("metadata_file", None)
trigger_type = _create_trigger_type(
pack=pack,
name=trigger_type["name"],
description=description,
payload_schema=payload_schema,
parameters_schema=parameters_schema,
tags=tags,
metadata_file=metadata_file,
)
trigger = _create_trigger(trigger_type=trigger_type)
return (trigger_type, trigger)
def add_trigger_models(trigger_types):
"""
Register trigger types.
:param trigger_types: A list of triggers to register.
:type trigger_types: ``list`` of ``dict``
:rtype: ``list`` of ``tuple`` (trigger_type, trigger)
"""
[
r
for r in (
_validate_trigger_type(trigger_type) for trigger_type in trigger_types
)
if r is not None
]
result = []
for trigger_type in trigger_types:
item = _add_trigger_models(trigger_type=trigger_type)
if item:
result.append(item)
return result
| |
"""Windows console screen buffer handlers."""
import atexit
import ctypes
import re
import sys
from colorclass.codes import ANSICodeMapping, BASE_CODES
from colorclass.core import RE_SPLIT
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
INVALID_HANDLE_VALUE = -1
IS_WINDOWS = sys.platform == 'win32'
RE_NUMBER_SEARCH = re.compile(r'\033\[([\d;]+)m')
STD_ERROR_HANDLE = -12
STD_OUTPUT_HANDLE = -11
WINDOWS_CODES = {
'/all': -33, '/fg': -39, '/bg': -49,
'black': 0, 'red': 4, 'green': 2, 'yellow': 6, 'blue': 1, 'magenta': 5, 'cyan': 3, 'white': 7,
'bgblack': -8, 'bgred': 64, 'bggreen': 32, 'bgyellow': 96, 'bgblue': 16, 'bgmagenta': 80, 'bgcyan': 48,
'bgwhite': 112,
'hiblack': 8, 'hired': 12, 'higreen': 10, 'hiyellow': 14, 'hiblue': 9, 'himagenta': 13, 'hicyan': 11, 'hiwhite': 15,
'hibgblack': 128, 'hibgred': 192, 'hibggreen': 160, 'hibgyellow': 224, 'hibgblue': 144, 'hibgmagenta': 208,
'hibgcyan': 176, 'hibgwhite': 240,
'/black': -39, '/red': -39, '/green': -39, '/yellow': -39, '/blue': -39, '/magenta': -39, '/cyan': -39,
'/white': -39, '/hiblack': -39, '/hired': -39, '/higreen': -39, '/hiyellow': -39, '/hiblue': -39, '/himagenta': -39,
'/hicyan': -39, '/hiwhite': -39,
'/bgblack': -49, '/bgred': -49, '/bggreen': -49, '/bgyellow': -49, '/bgblue': -49, '/bgmagenta': -49,
'/bgcyan': -49, '/bgwhite': -49, '/hibgblack': -49, '/hibgred': -49, '/hibggreen': -49, '/hibgyellow': -49,
'/hibgblue': -49, '/hibgmagenta': -49, '/hibgcyan': -49, '/hibgwhite': -49,
}
class COORD(ctypes.Structure):
"""COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119."""
_fields_ = [
('X', ctypes.c_short),
('Y', ctypes.c_short),
]
class SmallRECT(ctypes.Structure):
"""SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311."""
_fields_ = [
('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
"""CONSOLE_SCREEN_BUFFER_INFO structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093."""
_fields_ = [
('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', SmallRECT),
('dwMaximumWindowSize', COORD)
]
def init_kernel32(kernel32=None):
"""Load a unique instance of WinDLL into memory, set arg/return types, and get stdout/err handles.
1. Since we are setting DLL function argument types and return types, we need to maintain our own instance of
kernel32 to prevent overriding (or being overwritten by) user's own changes to ctypes.windll.kernel32.
2. While we're doing all this we might as well get the handles to STDOUT and STDERR streams.
3. If either stream has already been replaced set return value to INVALID_HANDLE_VALUE to indicate it shouldn't be
replaced.
:raise AttributeError: When called on a non-Windows platform.
:param kernel32: Optional mock kernel32 object. For testing.
:return: Loaded kernel32 instance, stderr handle (int), stdout handle (int).
:rtype: tuple
"""
if not kernel32:
kernel32 = ctypes.LibraryLoader(ctypes.WinDLL).kernel32 # Load our own instance. Unique memory address.
kernel32.GetStdHandle.argtypes = [ctypes.c_ulong]
kernel32.GetStdHandle.restype = ctypes.c_void_p
kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ConsoleScreenBufferInfo),
]
kernel32.GetConsoleScreenBufferInfo.restype = ctypes.c_long
# Get handles.
if hasattr(sys.stderr, '_original_stream'):
stderr = INVALID_HANDLE_VALUE
else:
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
if hasattr(sys.stdout, '_original_stream'):
stdout = INVALID_HANDLE_VALUE
else:
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
return kernel32, stderr, stdout
def get_console_info(kernel32, handle):
"""Get information about this current console window.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231
https://code.google.com/p/colorama/issues/detail?id=47
https://bitbucket.org/pytest-dev/py/src/4617fe46/py/_io/terminalwriter.py
Windows 10 Insider since around February 2016 finally introduced support for ANSI colors. No need to replace stdout
and stderr streams to intercept colors and issue multiple SetConsoleTextAttribute() calls for these consoles.
:raise OSError: When GetConsoleScreenBufferInfo or GetConsoleMode API calls fail.
:param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance.
:param int handle: stderr or stdout handle.
:return: Foreground and background colors (integers) as well as native ANSI support (bool).
:rtype: tuple
"""
# Query Win32 API.
csbi = ConsoleScreenBufferInfo() # Populated by GetConsoleScreenBufferInfo.
lpcsbi = ctypes.byref(csbi)
dword = ctypes.c_ulong() # Populated by GetConsoleMode.
lpdword = ctypes.byref(dword)
if not kernel32.GetConsoleScreenBufferInfo(handle, lpcsbi) or not kernel32.GetConsoleMode(handle, lpdword):
raise ctypes.WinError()
# Parse data.
# buffer_width = int(csbi.dwSize.X - 1)
# buffer_height = int(csbi.dwSize.Y)
# terminal_width = int(csbi.srWindow.Right - csbi.srWindow.Left)
# terminal_height = int(csbi.srWindow.Bottom - csbi.srWindow.Top)
fg_color = csbi.wAttributes % 16
bg_color = csbi.wAttributes & 240
native_ansi = bool(dword.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
return fg_color, bg_color, native_ansi
def bg_color_native_ansi(kernel32, stderr, stdout):
"""Get background color and if console supports ANSI colors natively for both streams.
:param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance.
:param int stderr: stderr handle.
:param int stdout: stdout handle.
:return: Background color (int) and native ANSI support (bool).
:rtype: tuple
"""
try:
if stderr == INVALID_HANDLE_VALUE:
raise OSError
bg_color, native_ansi = get_console_info(kernel32, stderr)[1:]
except OSError:
try:
if stdout == INVALID_HANDLE_VALUE:
raise OSError
bg_color, native_ansi = get_console_info(kernel32, stdout)[1:]
except OSError:
bg_color, native_ansi = WINDOWS_CODES['black'], False
return bg_color, native_ansi
class WindowsStream(object):
"""Replacement stream which overrides sys.stdout or sys.stderr. When writing or printing, ANSI codes are converted.
ANSI (Linux/Unix) color codes are converted into win32 system calls, changing the next character's color before
printing it. Resources referenced:
https://github.com/tartley/colorama
http://www.cplusplus.com/articles/2ywTURfi/
http://thomasfischer.biz/python-and-windows-terminal-colors/
http://stackoverflow.com/questions/17125440/c-win32-console-color
http://www.tysos.org/svn/trunk/mono/corlib/System/WindowsConsoleDriver.cs
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088#_win32_character_attributes
:cvar list ALL_BG_CODES: List of bg Windows codes. Used to determine if requested color is foreground or background.
:cvar dict COMPILED_CODES: Translation dict. Keys are ANSI codes (values of BASE_CODES), values are Windows codes.
:ivar int default_fg: Foreground Windows color code at the time of instantiation.
:ivar int default_bg: Background Windows color code at the time of instantiation.
"""
ALL_BG_CODES = [v for k, v in WINDOWS_CODES.items() if k.startswith('bg') or k.startswith('hibg')]
COMPILED_CODES = dict((v, WINDOWS_CODES[k]) for k, v in BASE_CODES.items() if k in WINDOWS_CODES)
def __init__(self, kernel32, stream_handle, original_stream):
"""Constructor.
:param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance.
:param int stream_handle: stderr or stdout handle.
:param original_stream: sys.stderr or sys.stdout before being overridden by this class' instance.
"""
self._kernel32 = kernel32
self._stream_handle = stream_handle
self._original_stream = original_stream
self.default_fg, self.default_bg = self.colors
def __getattr__(self, item):
"""If an attribute/function/etc is not defined in this function, retrieve the one from the original stream.
Fixes ipython arrow key presses.
"""
return getattr(self._original_stream, item)
@property
def colors(self):
"""Return the current foreground and background colors."""
try:
return get_console_info(self._kernel32, self._stream_handle)[:2]
except OSError:
return WINDOWS_CODES['white'], WINDOWS_CODES['black']
@colors.setter
def colors(self, color_code):
"""Change the foreground and background colors for subsequently printed characters.
None resets colors to their original values (when class was instantiated).
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
:param int color_code: Color code from WINDOWS_CODES.
"""
if color_code is None:
color_code = WINDOWS_CODES['/all']
# Get current color code.
current_fg, current_bg = self.colors
# Handle special negative codes. Also determine the final color code.
if color_code == WINDOWS_CODES['/fg']:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == WINDOWS_CODES['/bg']:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == WINDOWS_CODES['/all']:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == WINDOWS_CODES['bgblack']:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
self._kernel32.SetConsoleTextAttribute(self._stream_handle, final_color_code)
def write(self, p_str):
"""Write to stream.
:param str p_str: string to print.
"""
for segment in RE_SPLIT.split(p_str):
if not segment:
# Empty string. p_str probably starts with colors so the first item is always ''.
continue
if not RE_SPLIT.match(segment):
# No color codes, print regular text.
self._original_stream.write(segment)
self._original_stream.flush()
continue
for color_code in (int(c) for c in RE_NUMBER_SEARCH.findall(segment)[0].split(';')):
if color_code in self.COMPILED_CODES:
self.colors = self.COMPILED_CODES[color_code]
class Windows(object):
"""Enable and disable Windows support for ANSI color character codes.
Call static method Windows.enable() to enable color support for the remainder of the process' lifetime.
This class is also a context manager. You can do this:
with Windows():
print(Color('{autored}Test{/autored}'))
Or this:
with Windows(auto_colors=True):
print(Color('{autored}Test{/autored}'))
"""
@classmethod
def disable(cls):
"""Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values.
:return: If streams restored successfully.
:rtype: bool
"""
# Skip if not on Windows.
if not IS_WINDOWS:
return False
# Restore default colors.
if hasattr(sys.stderr, '_original_stream'):
getattr(sys, 'stderr').color = None
if hasattr(sys.stdout, '_original_stream'):
getattr(sys, 'stdout').color = None
# Restore original streams.
changed = False
if hasattr(sys.stderr, '_original_stream'):
changed = True
sys.stderr = getattr(sys.stderr, '_original_stream')
if hasattr(sys.stdout, '_original_stream'):
changed = True
sys.stdout = getattr(sys.stdout, '_original_stream')
return changed
@staticmethod
def is_enabled():
"""Return True if either stderr or stdout has colors enabled."""
return hasattr(sys.stderr, '_original_stream') or hasattr(sys.stdout, '_original_stream')
@classmethod
def enable(cls, auto_colors=False, reset_atexit=False):
"""Enable color text with print() or sys.stdout.write() (stderr too).
:param bool auto_colors: Automatically selects dark or light colors based on current terminal's background
color. Only works with {autored} and related tags.
:param bool reset_atexit: Resets original colors upon Python exit (in case you forget to reset it yourself with
a closing tag). Does nothing on native ANSI consoles.
:return: If streams replaced successfully.
:rtype: bool
"""
if not IS_WINDOWS:
return False # Windows only.
# Get values from init_kernel32().
kernel32, stderr, stdout = init_kernel32()
if stderr == INVALID_HANDLE_VALUE and stdout == INVALID_HANDLE_VALUE:
return False # No valid handles, nothing to do.
# Get console info.
bg_color, native_ansi = bg_color_native_ansi(kernel32, stderr, stdout)
# Set auto colors:
if auto_colors:
if bg_color in (112, 96, 240, 176, 224, 208, 160):
ANSICodeMapping.set_light_background()
else:
ANSICodeMapping.set_dark_background()
# Don't replace streams if ANSI codes are natively supported.
if native_ansi:
return False
# Reset on exit if requested.
if reset_atexit:
atexit.register(cls.disable)
# Overwrite stream references.
if stderr != INVALID_HANDLE_VALUE:
sys.stderr.flush()
sys.stderr = WindowsStream(kernel32, stderr, sys.stderr)
if stdout != INVALID_HANDLE_VALUE:
sys.stdout.flush()
sys.stdout = WindowsStream(kernel32, stdout, sys.stdout)
return True
def __init__(self, auto_colors=False):
"""Constructor."""
self.auto_colors = auto_colors
def __enter__(self):
"""Context manager, enables colors on Windows."""
self.enable(auto_colors=self.auto_colors)
def __exit__(self, *_):
"""Context manager, disabled colors on Windows."""
self.disable()
| |
# -*- coding: utf-8 -*-
"""
Magic Reload Library
Luke Campagnola 2010
Python reload function that actually works (the way you expect it to)
- No re-importing necessary
- Modules can be reloaded in any order
- Replaces functions and methods with their updated code
- Changes instances to use updated classes
- Automatically decides which modules to update by comparing file modification times
Does NOT:
- re-initialize exting instances, even if __init__ changes
- update references to any module-level objects
ie, this does not reload correctly:
from module import someObject
print someObject
..but you can use this instead: (this works even for the builtin reload)
import module
print module.someObject
"""
import inspect, os, sys, __builtin__, gc, traceback
from debug import printExc
def reloadAll(prefix=None, debug=False):
"""Automatically reload everything whose __file__ begins with prefix.
- Skips reload if the file has not been updated (if .pyc is newer than .py)
- if prefix is None, checks all loaded modules
"""
for modName, mod in sys.modules.items(): ## don't use iteritems; size may change during reload
if not inspect.ismodule(mod):
continue
if modName == '__main__':
continue
## Ignore if the file name does not start with prefix
if not hasattr(mod, '__file__') or os.path.splitext(mod.__file__)[1] not in ['.py', '.pyc']:
continue
if prefix is not None and mod.__file__[:len(prefix)] != prefix:
continue
## ignore if the .pyc is newer than the .py (or if there is no pyc or py)
py = os.path.splitext(mod.__file__)[0] + '.py'
pyc = py + 'c'
if os.path.isfile(pyc) and os.path.isfile(py) and os.stat(pyc).st_mtime >= os.stat(py).st_mtime:
#if debug:
#print "Ignoring module %s; unchanged" % str(mod)
continue
try:
reload(mod, debug=debug)
except:
printExc("Error while reloading module %s, skipping\n" % mod)
def reload(module, debug=False, lists=False, dicts=False):
"""Replacement for the builtin reload function:
- Reloads the module as usual
- Updates all old functions and class methods to use the new code
- Updates all instances of each modified class to use the new class
- Can update lists and dicts, but this is disabled by default
- Requires that class and function names have not changed
"""
if debug:
print "Reloading", module
## make a copy of the old module dictionary, reload, then grab the new module dictionary for comparison
oldDict = module.__dict__.copy()
__builtin__.reload(module)
newDict = module.__dict__
## Allow modules access to the old dictionary after they reload
if hasattr(module, '__reload__'):
module.__reload__(oldDict)
## compare old and new elements from each dict; update where appropriate
for k in oldDict:
old = oldDict[k]
new = newDict.get(k, None)
if old is new or new is None:
continue
if inspect.isclass(old):
if debug:
print " Updating class %s.%s (0x%x -> 0x%x)" % (module.__name__, k, id(old), id(new))
updateClass(old, new, debug)
elif inspect.isfunction(old):
depth = updateFunction(old, new, debug)
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print " Updating function %s.%s%s" % (module.__name__, k, extra)
elif lists and isinstance(old, list):
l = old.len()
old.extend(new)
for i in range(l):
old.pop(0)
elif dicts and isinstance(old, dict):
old.update(new)
for k in old:
if k not in new:
del old[k]
## For functions:
## 1) update the code and defaults to new versions.
## 2) keep a reference to the previous version so ALL versions get updated for every reload
def updateFunction(old, new, debug, depth=0, visited=None):
#if debug and depth > 0:
#print " -> also updating previous version", old, " -> ", new
old.__code__ = new.__code__
old.__defaults__ = new.__defaults__
if visited is None:
visited = []
if old in visited:
return
visited.append(old)
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
maxDepth = updateFunction(old.__previous_reload_version__, new, debug, depth=depth+1, visited=visited)
else:
maxDepth = depth
## We need to keep a pointer to the previous version so we remember to update BOTH
## when the next reload comes around.
if depth == 0:
new.__previous_reload_version__ = old
return maxDepth
## For classes:
## 1) find all instances of the old class and set instance.__class__ to the new class
## 2) update all old class methods to use code from the new class methods
def updateClass(old, new, debug):
## Track town all instances and subclasses of old
refs = gc.get_referrers(old)
for ref in refs:
try:
if isinstance(ref, old) and ref.__class__ is old:
ref.__class__ = new
if debug:
print " Changed class for", safeStr(ref)
elif inspect.isclass(ref) and issubclass(ref, old) and old in ref.__bases__:
ind = ref.__bases__.index(old)
## Does not work:
#ref.__bases__ = ref.__bases__[:ind] + (new,) + ref.__bases__[ind+1:]
## reason: Even though we change the code on methods, they remain bound
## to their old classes (changing im_class is not allowed). Instead,
## we have to update the __bases__ such that this class will be allowed
## as an argument to older methods.
## This seems to work. Is there any reason not to?
## Note that every time we reload, the class hierarchy becomes more complex.
## (and I presume this may slow things down?)
ref.__bases__ = ref.__bases__[:ind] + (new,old) + ref.__bases__[ind+1:]
if debug:
print " Changed superclass for", safeStr(ref)
#else:
#if debug:
#print " Ignoring reference", type(ref)
except:
print "Error updating reference (%s) for class change (%s -> %s)" % (safeStr(ref), safeStr(old), safeStr(new))
raise
## update all class methods to use new code.
## Generally this is not needed since instances already know about the new class,
## but it fixes a few specific cases (pyqt signals, for one)
for attr in dir(old):
oa = getattr(old, attr)
if inspect.ismethod(oa):
try:
na = getattr(new, attr)
except AttributeError:
if debug:
print " Skipping method update for %s; new class does not have this attribute" % attr
continue
if hasattr(oa, 'im_func') and hasattr(na, 'im_func') and oa.im_func is not na.im_func:
depth = updateFunction(oa.im_func, na.im_func, debug)
#oa.im_class = new ## bind old method to new class ## not allowed
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print " Updating method %s%s" % (attr, extra)
## And copy in new functions that didn't exist previously
for attr in dir(new):
if not hasattr(old, attr):
if debug:
print " Adding missing attribute", attr
setattr(old, attr, getattr(new, attr))
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
updateClass(old.__previous_reload_version__, new, debug)
## It is possible to build classes for which str(obj) just causes an exception.
## Avoid thusly:
def safeStr(obj):
try:
s = str(obj)
except:
try:
s = repr(obj)
except:
s = "<instance of %s at 0x%x>" % (safeStr(type(obj)), id(obj))
return s
## Tests:
# write modules to disk, import, then re-write and run again
if __name__ == '__main__':
doQtTest = True
try:
from PyQt4 import QtCore
if not hasattr(QtCore, 'Signal'):
QtCore.Signal = QtCore.pyqtSignal
#app = QtGui.QApplication([])
class Btn(QtCore.QObject):
sig = QtCore.Signal()
def emit(self):
self.sig.emit()
btn = Btn()
except:
raise
print "Error; skipping Qt tests"
doQtTest = False
import os
if not os.path.isdir('test1'):
os.mkdir('test1')
open('test1/__init__.py', 'w')
modFile1 = "test1/test1.py"
modCode1 = """
import sys
class A(object):
def __init__(self, msg):
object.__init__(self)
self.msg = msg
def fn(self, pfx = ""):
print pfx+"A class:", self.__class__, id(self.__class__)
print pfx+" %%s: %d" %% self.msg
class B(A):
def fn(self, pfx=""):
print pfx+"B class:", self.__class__, id(self.__class__)
print pfx+" %%s: %d" %% self.msg
print pfx+" calling superclass.. (%%s)" %% id(A)
A.fn(self, " ")
"""
modFile2 = "test2.py"
modCode2 = """
from test1.test1 import A
from test1.test1 import B
a1 = A("ax1")
b1 = B("bx1")
class C(A):
def __init__(self, msg):
#print "| C init:"
#print "| C.__bases__ = ", map(id, C.__bases__)
#print "| A:", id(A)
#print "| A.__init__ = ", id(A.__init__.im_func), id(A.__init__.im_func.__code__), id(A.__init__.im_class)
A.__init__(self, msg + "(init from C)")
def fn():
print "fn: %s"
"""
open(modFile1, 'w').write(modCode1%(1,1))
open(modFile2, 'w').write(modCode2%"message 1")
import test1.test1 as test1
import test2
print "Test 1 originals:"
A1 = test1.A
B1 = test1.B
a1 = test1.A("a1")
b1 = test1.B("b1")
a1.fn()
b1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
from test2 import fn, C
if doQtTest:
print "Button test before:"
btn.sig.connect(fn)
btn.sig.connect(a1.fn)
btn.emit()
#btn.sig.emit()
print ""
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print "Test2 before reload:"
fn()
oldfn = fn
test2.a1.fn()
test2.b1.fn()
c1 = test2.C('c1')
c1.fn()
os.remove(modFile1+'c')
open(modFile1, 'w').write(modCode1%(2,2))
print "\n----RELOAD test1-----\n"
reloadAll(os.path.abspath(__file__)[:10], debug=True)
print "Subclass test:"
c2 = test2.C('c2')
c2.fn()
os.remove(modFile2+'c')
open(modFile2, 'w').write(modCode2%"message 2")
print "\n----RELOAD test2-----\n"
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print "Button test after:"
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print "Test2 after reload:"
fn()
test2.a1.fn()
test2.b1.fn()
print "\n==> Test 1 Old instances:"
a1.fn()
b1.fn()
c1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
print "\n==> Test 1 New instances:"
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
c2 = test2.C('c2')
c2.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
os.remove(modFile1+'c')
os.remove(modFile2+'c')
open(modFile1, 'w').write(modCode1%(3,3))
open(modFile2, 'w').write(modCode2%"message 3")
print "\n----RELOAD-----\n"
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print "Button test after:"
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print "Test2 after reload:"
fn()
test2.a1.fn()
test2.b1.fn()
print "\n==> Test 1 Old instances:"
a1.fn()
b1.fn()
print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
print "\n==> Test 1 New instances:"
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
os.remove(modFile1)
os.remove(modFile2)
os.remove(modFile1+'c')
os.remove(modFile2+'c')
os.system('rm -r test1')
#
# Failure graveyard ahead:
#
"""Reload Importer:
Hooks into import system to
1) keep a record of module dependencies as they are imported
2) make sure modules are always reloaded in correct order
3) update old classes and functions to use reloaded code"""
#import imp, sys
## python's import hook mechanism doesn't work since we need to be
## informed every time there is an import statement, not just for new imports
#class ReloadImporter:
#def __init__(self):
#self.depth = 0
#def find_module(self, name, path):
#print " "*self.depth + "find: ", name, path
##if name == 'PyQt4' and path is None:
##print "PyQt4 -> PySide"
##self.modData = imp.find_module('PySide')
##return self
##return None ## return none to allow the import to proceed normally; return self to intercept with load_module
#self.modData = imp.find_module(name, path)
#self.depth += 1
##sys.path_importer_cache = {}
#return self
#def load_module(self, name):
#mod = imp.load_module(name, *self.modData)
#self.depth -= 1
#print " "*self.depth + "load: ", name
#return mod
#def pathHook(path):
#print "path hook:", path
#raise ImportError
#sys.path_hooks.append(pathHook)
#sys.meta_path.append(ReloadImporter())
### replace __import__ with a wrapper that tracks module dependencies
#modDeps = {}
#reloadModule = None
#origImport = __builtins__.__import__
#def _import(name, globals=None, locals=None, fromlist=None, level=-1, stack=[]):
### Note that stack behaves as a static variable.
##print " "*len(importStack) + "import %s" % args[0]
#stack.append(set())
#mod = origImport(name, globals, locals, fromlist, level)
#deps = stack.pop()
#if len(stack) > 0:
#stack[-1].add(mod)
#elif reloadModule is not None: ## If this is the top level import AND we're inside a module reload
#modDeps[reloadModule].add(mod)
#if mod in modDeps:
#modDeps[mod] |= deps
#else:
#modDeps[mod] = deps
#return mod
#__builtins__.__import__ = _import
### replace
#origReload = __builtins__.reload
#def _reload(mod):
#reloadModule = mod
#ret = origReload(mod)
#reloadModule = None
#return ret
#__builtins__.reload = _reload
#def reload(mod, visited=None):
#if visited is None:
#visited = set()
#if mod in visited:
#return
#visited.add(mod)
#for dep in modDeps.get(mod, []):
#reload(dep, visited)
#__builtins__.reload(mod)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import numpy
import tensorflow as tf
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's web, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph is should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| |
"""Support for Telldus Live."""
import asyncio
import logging
from functools import partial
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from . import config_flow # noqa pylint_disable=unused-import
from .const import (
CONF_HOST,
DOMAIN,
KEY_SCAN_INTERVAL,
KEY_SESSION,
MIN_UPDATE_INTERVAL,
NOT_SO_PRIVATE_KEY,
PUBLIC_KEY,
SCAN_INTERVAL,
SIGNAL_UPDATE_ENTITY,
TELLDUS_DISCOVERY_NEW,
)
APPLICATION_NAME = "Home Assistant"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST, default=DOMAIN): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): vol.All(
cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
DATA_CONFIG_ENTRY_LOCK = "tellduslive_config_entry_lock"
CONFIG_ENTRY_IS_SETUP = "telldus_config_entry_is_setup"
NEW_CLIENT_TASK = "telldus_new_client_task"
INTERVAL_TRACKER = "{}_INTERVAL".format(DOMAIN)
async def async_setup_entry(hass, entry):
"""Create a tellduslive session."""
from tellduslive import Session
conf = entry.data[KEY_SESSION]
if CONF_HOST in conf:
# Session(**conf) does blocking IO when
# communicating with local devices.
session = await hass.async_add_executor_job(partial(Session, **conf))
else:
session = Session(
PUBLIC_KEY, NOT_SO_PRIVATE_KEY, application=APPLICATION_NAME, **conf
)
if not session.is_authorized:
_LOGGER.error("Authentication Error")
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[NEW_CLIENT_TASK] = hass.loop.create_task(
async_new_client(hass, session, entry)
)
return True
async def async_new_client(hass, session, entry):
"""Add the hubs associated with the current client to device_registry."""
interval = entry.data[KEY_SCAN_INTERVAL]
_LOGGER.debug("Update interval %s seconds.", interval)
client = TelldusLiveClient(hass, entry, session, interval)
hass.data[DOMAIN] = client
dev_reg = await hass.helpers.device_registry.async_get_registry()
for hub in await client.async_get_hubs():
_LOGGER.debug("Connected hub %s", hub["name"])
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, hub["id"])},
manufacturer="Telldus",
name=hub["name"],
model=hub["type"],
sw_version=hub["version"],
)
await client.update()
async def async_setup(hass, config):
"""Set up the Telldus Live component."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: config[DOMAIN].get(CONF_HOST),
KEY_SCAN_INTERVAL: config[DOMAIN][CONF_SCAN_INTERVAL],
},
)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
if not hass.data[NEW_CLIENT_TASK].done():
hass.data[NEW_CLIENT_TASK].cancel()
interval_tracker = hass.data.pop(INTERVAL_TRACKER)
interval_tracker()
await asyncio.wait(
[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in hass.data.pop(CONFIG_ENTRY_IS_SETUP)
]
)
del hass.data[DOMAIN]
del hass.data[DATA_CONFIG_ENTRY_LOCK]
return True
class TelldusLiveClient:
"""Get the latest data and update the states."""
def __init__(self, hass, config_entry, session, interval):
"""Initialize the Tellus data object."""
self._known_devices = set()
self._device_infos = {}
self._hass = hass
self._config_entry = config_entry
self._client = session
self._interval = interval
async def async_get_hubs(self):
"""Return hubs registered for the user."""
clients = await self._hass.async_add_executor_job(self._client.get_clients)
return clients or []
def device_info(self, device_id):
"""Return device info."""
return self._device_infos.get(device_id)
@staticmethod
def identify_device(device):
"""Find out what type of HA component to create."""
if device.is_sensor:
return "sensor"
from tellduslive import DIM, UP, TURNON
if device.methods & DIM:
return "light"
if device.methods & UP:
return "cover"
if device.methods & TURNON:
return "switch"
if device.methods == 0:
return "binary_sensor"
_LOGGER.warning("Unidentified device type (methods: %d)", device.methods)
return "switch"
async def _discover(self, device_id):
"""Discover the component."""
device = self._client.device(device_id)
component = self.identify_device(device)
self._device_infos.update(
{device_id: await self._hass.async_add_executor_job(device.info)}
)
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if component not in self._hass.data[CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(component)
device_ids = []
if device.is_sensor:
for item in device.items:
device_ids.append((device.device_id, item.name, item.scale))
else:
device_ids.append(device_id)
for _id in device_ids:
async_dispatcher_send(
self._hass, TELLDUS_DISCOVERY_NEW.format(component, DOMAIN), _id
)
async def update(self, *args):
"""Periodically poll the servers for current state."""
try:
if not await self._hass.async_add_executor_job(self._client.update):
_LOGGER.warning("Failed request")
return
dev_ids = {dev.device_id for dev in self._client.devices}
new_devices = dev_ids - self._known_devices
# just await each discover as `gather` use up all HTTPAdapter pools
for d_id in new_devices:
await self._discover(d_id)
self._known_devices |= new_devices
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
finally:
self._hass.data[INTERVAL_TRACKER] = async_call_later(
self._hass, self._interval, self.update
)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
| |
#!/usr/bin/env python
"""Fit F(V,T) data to find EOS parameters from Castep
A Castep 'thermodynamcs' run produces a table of
thermodynamic data from a lattice dynamics calculation.
If a serise of such calculations are done for stuctures
optimised at different hydrostatic pressures, this information
can be used extract the free energy and volume as a function
of pressure and temperature in the 'statically constrained'
quasi-harmonic approximation (SC QHA). That is, we assume that
the phonon freequencies are only a function of the unit cell
volume, and are not directly altered by temperature (via
the anharmonic nature of atomic vibrations) and, furthermore,
that a static cell optimisation gives the cell parameters and
internal structure appropreate at some higher temperature and
lower pressure.
This script provides tools to fit Castep thermodynamics data
to a set of isothermal EOS and thus find V(P,T). """
import re
import numpy as np
import scipy.optimize as spopt
import bm3_eos as eos
# Some regular expressions that get use a lot,
# so we compile them when the module is loaded
_vol_re = re.compile(r'Current cell volume =\s+(\d+\.\d+)\s+A\*\*3')
_zpe_re = re.compile(r'Zero-point energy =\s+(\d+\.\d+)\s+eV')
_tmo_re = re.compile(
r'(\d+\.\d+)\s+(\d+\.\d+)\s+([+-]?\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)')
_ufb_re = re.compile(
r'Total energy corrected for finite basis set =\s+([+-]?\d+\.\d+)\s+eV')
def parse_castep_file(filename, current_data=[]):
"""Read a Castep output file with thermodynamic data and extract it
This appends thermodynamic data, in the form of a tuple, for
each temperature at which lattice dynamics was used to calculate
thermodynamic data. The tuple includes (in order) the cell
volume (A**3), static internal energy including the finite basis
set correctio (eV), the zero point energy (eV), the temperature
(K), vibrational energy (eV), vibriational component of the
Helmohotz free energy (eV), the entropy (J/mol/K) and heat
capacity (J/mol/K). These tuples are returned in a list
in the order found in the file. Multiple runs can be joined
together in one file and this is handled correctly. If multile
files need to be parsed the function can be called repetedly
with the output list passed in as the optional current_data
argument and new data will be appended to this list.
"""
fh = open(filename, 'r')
current_volume = None
in_thermo = False
skip_lines = 0
for line in fh:
if skip_lines > 0:
skip_lines = skip_lines - 1
continue
match = _vol_re.search(line)
if match:
# We need to keep track of the *current* volume
current_volume = float(match.group(1))
continue
match = _ufb_re.search(line)
if match:
# We need to keep track of the *current* internal energy
U = float(match.group(1))
continue
match = _zpe_re.search(line)
if match:
# A line with the zero point energy must start a
# thermo block. We need to skip three
# lines first though.
zpe = float(match.group(1))
in_thermo = True
skip_lines = 3
continue
if in_thermo:
# This is the bulk of the data in the table
match = _tmo_re.search(line)
if match:
T = float(match.group(1))
E = float(match.group(2))
F = float(match.group(3))
S = float(match.group(4))
Cv = float(match.group(5))
current_data.append((current_volume, U, zpe, T,
E, F, S, Cv))
continue
else:
# Must be at the end of this thermo table
in_thermo = False
zpe = None
U = None
current_volume = None
continue
fh.close()
return current_data
def get_VF(data_table, T):
"""Given the data file from parse_castep_file return useful data at T
The data table is searched for data at the target temperature, T
(K), and numpy arrays of volumes (A**3) and the Helmholtz free
energy, F, (eV) is returned. Note that:
F(V,T) = U(V) + F_{vib}(V,T)
where U(V) (static) potential energy of the system at the chosen
volume and F_{vib}(V,T) is the vibrational Helmholtz free energy
given by:
F_{vib}(V,T) = ZPE(V) + E_{vib}(V,T) - T.S_{vib}(V,T)
i.e. the sum of the zero point energy, the phonon internal
energy and the phonon entropy. This second summation is
performed by Castep and F_{vib} is reported in the table of
thermodynamic quantaties.
If T==0 this function returns U(V)+ZPE(V), which can be used to
fit a true zero K EOS. If T=='static' just U(V) is returned, which
can be used to fit a athermal, or static, EOS.
"""
# For static or 0K runs, we can use any T we choose, so use the
# first one in the table.
if T=='static':
mode = 'static'
T = data_table[0][3]
elif T==0:
mode = 'zpe'
T = data_table[0][3]
else:
mode = 'f'
F = []
V = []
for line in data_table:
if line[3] == T:
if mode == 'static':
F.append(line[1]) # F(V,0) = U(V)
V.append(line[0])
elif mode == 'zpe':
F.append(line[2] + line[1]) # F(V,0) = U(V)+ZPE(V)
V.append(line[0])
else:
# Move to total helmholtz free energy
# this is U_0(V) + F_vib(V,T)
F.append(line[5]+line[1])
V.append(line[0])
F = np.array(F)
V = np.array(V)
return V, F
if __name__=='__main__':
import sys
data = []
for file in sys.argv[1:]:
data = parse_castep_file(file, data)
Ts = [0, 500, 1000, 1500, 2000, 2500, 3000, 3500]
Vs = []
Fs = []
K0s = []
Kp0s = []
E0s = []
V0s = []
min_V = 1.0E12
max_V = 0.0
for T in Ts:
V, F = get_VF(data, T)
V0, E0, K0, Kp0 = eos.fit_BM3_EOS(V, F, verbose=True)
if np.max(V) > max_V: max_V = np.max(V)
if np.min(V) < min_V: min_V = np.min(V)
Vs.append(V)
Fs.append(F)
K0s.append(K0)
Kp0s.append(Kp0)
E0s.append(E0)
V0s.append(V0)
print("Athermal EOS")
Vstat, Fstat = get_VF(data, 'static')
V0stat, Estat, Kstat, Kpstat = eos.fit_BM3_EOS(Vstat, Fstat,
verbose=True)
print("0K EOS")
Vzpe, Fzpe = get_VF(data, 0)
eos.fit_BM3_EOS(Vzpe, Fzpe, verbose=True)
#eos.BM3_EOS_energy_plot(Vs, Fs, V0s, E0s, K0s, Kp0s, Ts=Ts)
#eos.BM3_EOS_pressure_plot(np.floor(min_V), np.ceil(max_V), V0s,
# K0s, Kp0s, Ts=Ts)
eos.BM3_EOS_twoplots(np.floor(min_V), np.ceil(max_V),
Vs, Fs, V0s, E0s, K0s, Kp0s, Ts, filename='EOSfits.eps')
fV0, fE0, fK0, fKp0 = eos.fit_parameters_quad(Ts, V0s, E0s, K0s, Kp0s,
plot=True, filename='EOSparams.eps', table='EOSparams.tex')
print("P (GPa) T (K) V (ang**3)")
print(0, 0, eos.get_V(0, 0, fV0, fK0, fKp0))
print(0, 300, eos.get_V(0, 300, fV0, fK0, fKp0))
print(25, 0, eos.get_V(25, 0, fV0, fK0, fKp0))
print(25, 2600, eos.get_V(25, 2500, fV0, fK0, fKp0))
print(25, 3200, eos.get_V(25, 3500, fV0, fK0, fKp0))
print(60, 0, eos.get_V(60, 0, fV0, fK0, fKp0))
print(60, 3000, eos.get_V(60, 3000, fV0, fK0, fKp0))
print(60, 4000, eos.get_V(60, 4000, fV0, fK0, fKp0))
print("Extrapolating for forsterite")
print(60, 3500, eos.get_V(60, 3500, fV0, fK0, fKp0) )
print(60, 4000, eos.get_V(60, 3250, fV0, fK0, fKp0) + ((eos.get_V(60, 3500, fV0, fK0, fKp0)-eos.get_V(60, 3000, fV0, fK0, fKp0))/500.0)*750)
| |
from fontTools.misc import arrayTools
from defcon.objects.base import BaseObject
from defcon.tools.fuzzyNumber import FuzzyNumber
from defcon.tools.notifications import NotificationCenter
_representationFactories = {}
def addRepresentationFactory(name, factory):
_representationFactories[name] = factory
def removeRepresentationFactory(name):
del _representationFactories[name]
class Glyph(BaseObject):
"""
This object represents a glyph and it contains contour, component, anchor
and other assorted bits data about the glyph.
**This object posts the following notifications:**
===================== ====
Name Note
===================== ====
Glyph.Changed Posted when the *dirty* attribute is set.
Glyph.NameChanged Posted after the *reloadGlyphs* method has been called.
Glyph.UnicodesChanged Posted after the *reloadGlyphs* method has been called.
===================== ====
The Glyph object has list like behavior. This behavior allows you to interact
with contour data directly. For example, to get a particular contour::
contour = glyph[0]
To iterate over all contours::
for contour in glyph:
To get the number of contours::
contourCount = len(glyph)
To interact with components or anchors in a similar way,
use the ``components`` and ``anchors`` attributes.
"""
changeNotificationName = "Glyph.Changed"
beginUndoNotificationName = "Glyph.BeginUndo"
endUndoNotificationName = "Glyph.EndUndo"
beginRedoNotificationName = "Glyph.BeginRedo"
endRedoNotificationName = "Glyph.EndRedo"
def __init__(self, contourClass=None, pointClass=None, componentClass=None, anchorClass=None, libClass=None):
super(Glyph, self).__init__()
self._parent = None
self._dirty = False
self._name = None
self._unicodes = []
self._width = 0
self._note = None
self._dispatcher = None
self._contours = []
self._components = []
self._anchors = []
self._lib = None
self._boundsCache = None
self._controlPointBoundsCache = None
self._representations = {}
if contourClass is None:
from contour import Contour
contourClass = Contour
if pointClass is None:
from point import Point
pointClass = Point
if componentClass is None:
from component import Component
componentClass = Component
if anchorClass is None:
from anchor import Anchor
anchorClass = Anchor
if libClass is None:
from lib import Lib
libClass = Lib
self._contourClass = contourClass
self._pointClass = pointClass
self._componentClass = componentClass
self._anchorClass = anchorClass
self._lib = libClass()
self._lib.setParent(self)
def _set_dispatcher(self, dispatcher):
super(Glyph, self)._set_dispatcher(dispatcher)
if dispatcher is not None:
for contour in self._contours:
self._setParentDataInContour(contour)
for component in self._components:
self._setParentDataInComponent(component)
for anchor in self._anchors:
self._setParentDataInAnchor(anchor)
self._lib.dispatcher = dispatcher
self._lib.addObserver(observer=self, methodName="_libContentChanged", notification="Lib.Changed")
self.addObserver(observer=self, methodName="destroyAllRepresentations", notification="Glyph.Changed")
def _get_dispatcher(self):
return super(Glyph, self)._get_dispatcher()
dispatcher = property(_get_dispatcher, _set_dispatcher, doc="The :class:`~defcon.tools.notifications.NotificationCenter` object assigned to the glyph.")
def _destroyBoundsCache(self):
self._boundsCache = None
self._controlPointBoundsCache = None
# ----------
# Attributes
# ----------
def _get_contourClass(self):
return self._contourClass
contourClass = property(_get_contourClass, doc="The class used for contours.")
def _get_pointClass(self):
return self._pointClass
pointClass = property(_get_pointClass, doc="The class used for points.")
def _get_componentClass(self):
return self._componentClass
componentClass = property(_get_componentClass, doc="The class used for components.")
def _get_anchorClass(self):
return self._anchorClass
anchorClass = property(_get_anchorClass, doc="The class used for anchors.")
def _set_name(self, value):
oldName = self._name
if oldName != value:
self._name = value
self.dirty = True
dispatcher = self.dispatcher
if dispatcher is not None:
data = dict(oldName=oldName, newName=value)
self.dispatcher.postNotification(notification="Glyph.NameChanged", observable=self, data=data)
def _get_name(self):
return self._name
name = property(_get_name, _set_name, doc="The name of the glyph. Setting this posts a *Glyph.NameChanged* notification.")
def _get_unicodes(self):
return list(self._unicodes)
def _set_unicodes(self, value):
oldValue = self.unicodes
if oldValue != value:
self._unicodes = value
self.dirty = True
dispatcher = self.dispatcher
if dispatcher is not None:
data = dict(oldValues=oldValue, newValues=value)
self.dispatcher.postNotification(notification="Glyph.UnicodesChanged", observable=self, data=data)
unicodes = property(_get_unicodes, _set_unicodes, doc="The list of unicode values assigned to the glyph. Setting this posts *Glyph.UnicodesChanged* and *Glyph.Changed* notifications.")
def _get_unicode(self):
if self._unicodes:
return self._unicodes[0]
return None
def _set_unicode(self, value):
if value is None:
self.unicodes = []
else:
existing = list(self._unicodes)
if value in existing:
existing.pop(existing.index(value))
existing.insert(0, value)
self.unicodes = existing
unicode = property(_get_unicode, _set_unicode, doc="The primary unicode value for the glyph. This is the equivalent of ``glyph.unicodes[0]``. This is a convenience attribute that works with the ``unicodes`` attribute.")
def _get_bounds(self):
from robofab.pens.boundsPen import BoundsPen
if self._boundsCache is None:
pen = BoundsPen(self.getParent())
self.draw(pen)
self._boundsCache = pen.bounds
return self._boundsCache
bounds = property(_get_bounds, doc="The bounds of the glyph's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
from robofab.pens.boundsPen import ControlBoundsPen
if self._controlPointBoundsCache is None:
pen = ControlBoundsPen(self.getParent())
self.draw(pen)
self._controlPointBoundsCache = pen.bounds
return self._controlPointBoundsCache
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the glyph. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
def _get_leftMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return xMin
def _set_leftMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
diff = value - xMin
self.move((diff, 0))
self._width += diff
self.dirty = True
leftMargin = property(_get_leftMargin, _set_leftMargin, doc="The left margin of the glyph. Setting this posts a *Glyph.Changed* notification.")
def _get_rightMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return self._width - xMax
def _set_rightMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
self._width = xMax + value
self.dirty = True
rightMargin = property(_get_rightMargin, _set_rightMargin, doc="The right margin of the glyph. Setting this posts a *Glyph.Changed* notification.")
def _get_width(self):
return self._width
def _set_width(self, value):
self._width = value
self.dirty = True
width = property(_get_width, _set_width, doc="The width of the glyph. Setting this posts a *Glyph.Changed* notification.")
def _get_components(self):
return list(self._components)
components = property(_get_components, doc="An ordered list of :class:`Component` objects stored in the glyph.")
def _get_anchors(self):
return list(self._anchors)
anchors = property(_get_anchors, doc="An ordered list of :class:`Anchor` objects stored in the glyph.")
def _get_note(self):
return self._note
def _set_note(self, value):
if value is not None:
assert isinstance(value, basestring)
self._note = value
self.dirty = True
note = property(_get_note, _set_note, doc="An arbitrary note for the glyph. Setting this will post a *Glyph.Changed* notification.")
def _get_lib(self):
return self._lib
def _set_lib(self, value):
self._lib.clear()
self._lib.update(value)
self.dirty = True
lib = property(_get_lib, _set_lib, doc="The glyph's :class:`Lib` object. Setting this will clear any existing lib data and post a *Glyph.Changed* notification if data was replaced.")
# -----------
# Pen Methods
# -----------
def draw(self, pen):
"""
Draw the glyph with **pen**.
"""
from robofab.pens.adapterPens import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the glyph with **pointPen**.
"""
for contour in self._contours:
contour.drawPoints(pointPen)
for component in self._components:
component.drawPoints(pointPen)
for anchor in self._anchors:
anchor.drawPoints(pointPen)
def getPen(self):
"""
Get the pen used to draw into this glyph.
"""
from robofab.pens.adapterPens import SegmentToPointPen
return SegmentToPointPen(self.getPointPen())
def getPointPen(self):
"""
Get the point pen used to draw into this glyph.
"""
from defcon.pens.glyphObjectPointPen import GlyphObjectPointPen
return GlyphObjectPointPen(self)
# -------
# Methods
# -------
def __len__(self):
return len(self._contours)
def __iter__(self):
contourCount = len(self)
index = 0
while index < contourCount:
contour = self[index]
yield contour
index += 1
def __getitem__(self, index):
return self._contours[index]
def _getContourIndex(self, contour):
return self._contours.index(contour)
def _setParentDataInContour(self, contour):
contour.setParent(self)
dispatcher = self.dispatcher
if dispatcher is not None:
contour.dispatcher = dispatcher
contour.addObserver(observer=self, methodName="_outlineContentChanged", notification="Contour.Changed")
def _removeParentDataInContour(self, contour):
contour.setParent(None)
if contour._dispatcher is not None:
contour.removeObserver(observer=self, notification="Contour.Changed")
contour._dispatcher = None
def _setParentDataInComponent(self, component):
component.setParent(self)
dispatcher = self.dispatcher
if dispatcher is not None:
component.dispatcher = dispatcher
component.addObserver(observer=self, methodName="_outlineContentChanged", notification="Component.Changed")
def _removeParentDataInComponent(self, component):
component.setParent(None)
if component._dispatcher is not None:
component.removeObserver(observer=self, notification="Component.Changed")
component._dispatcher = None
def _setParentDataInAnchor(self, anchor):
anchor.setParent(self)
dispatcher = self.dispatcher
if dispatcher is not None:
anchor.dispatcher = dispatcher
anchor.addObserver(observer=self, methodName="_outlineContentChanged", notification="Anchor.Changed")
def _removeParentDataInAnchor(self, anchor):
anchor.setParent(None)
if anchor._dispatcher is not None:
anchor.removeObserver(observer=self, notification="Anchor.Changed")
anchor._dispatcher = None
def appendContour(self, contour):
"""
Append **contour** to the glyph. The contour must be a defcon
:class:`Contour` object or a subclass of that object.
This will post a *Glyph.Changed* notification.
"""
assert contour not in self._contours
self.insertContour(len(self._contours), contour)
def appendComponent(self, component):
"""
Append **component** to the glyph. The component must be a defcon
:class:`Component` object or a subclass of that object.
This will post a *Glyph.Changed* notification.
"""
assert component not in self._components
self.insertComponent(len(self._components), component)
def appendAnchor(self, anchor):
"""
Append **anchor** to the glyph. The anchor must be a defcon
:class:`Anchor` object or a subclass of that object.
This will post a *Glyph.Changed* notification.
"""
assert anchor not in self._anchors
self.insertAnchor(len(self._anchors), anchor)
def insertContour(self, index, contour):
"""
Insert **contour** into the glyph at index. The contour
must be a defcon :class:`Contour` object or a subclass
of that object.
This will post a *Glyph.Changed* notification.
"""
assert contour not in self._contours
if contour.getParent() != self:
self._setParentDataInContour(contour)
self._contours.insert(index, contour)
self._destroyBoundsCache()
self.dirty = True
def insertComponent(self, index, component):
"""
Insert **component** into the glyph at index. The component
must be a defcon :class:`Component` object or a subclass
of that object.
This will post a *Glyph.Changed* notification.
"""
assert component not in self._components
if component.getParent() != self:
self._setParentDataInComponent(component)
self._components.insert(index, component)
self._destroyBoundsCache()
self.dirty = True
def insertAnchor(self, index, anchor):
"""
Insert **anchor** into the glyph at index. The anchor
must be a defcon :class:`Anchor` object or a subclass
of that object.
This will post a *Glyph.Changed* notification.
"""
assert anchor not in self._anchors
if anchor.getParent() != self:
self._setParentDataInAnchor(anchor)
self._anchors.insert(index, anchor)
self.dirty = True
def removeContour(self, contour):
"""
Remove **contour** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self._contours.remove(contour)
self._removeParentDataInContour(contour)
self._destroyBoundsCache()
self.dirty = True
def removeComponent(self, component):
"""
Remove **component** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self._components.remove(component)
self._removeParentDataInComponent(component)
self._destroyBoundsCache()
self.dirty = True
def removeAnchor(self, anchor):
"""
Remove **anchor** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self._anchors.remove(anchor)
self._removeParentDataInAnchor(anchor)
self.dirty = True
def contourIndex(self, contour):
"""
Get the index for **contour**.
"""
return self._contours.index(contour)
def componentIndex(self, component):
"""
Get the index for **component**.
"""
return self._components.index(component)
def anchorIndex(self, anchor):
"""
Get the index for **anchor**.
"""
return self._anchors.index(anchor)
def clear(self):
"""
Clear all contours, components and anchors from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
self.clearContours()
self.clearComponents()
self.clearAnchors()
self.releaseHeldNotifications()
def clearContours(self):
"""
Clear all contours from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for contour in reversed(self._contours):
self.removeContour(contour)
self.releaseHeldNotifications()
def clearComponents(self):
"""
Clear all components from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for component in reversed(self._components):
self.removeComponent(component)
self.releaseHeldNotifications()
def clearAnchors(self):
"""
Clear all anchors from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for anchor in reversed(self._anchors):
self.removeAnchor(anchor)
self.releaseHeldNotifications()
def move(self, (x, y)):
"""
Move all contours, components and anchors in the glyph
by **(x, y)**.
This posts a *Glyph.Changed* notification.
"""
oldBounds = self._boundsCache
oldControlPointBounds = self._controlPointBoundsCache
for contour in self._contours:
contour.move((x, y))
for component in self._components:
component.move((x, y))
for anchor in self._anchors:
anchor.move((x, y))
if oldBounds:
xMin, yMin, xMax, yMax = oldBounds
xMin += x
yMin += y
xMax += x
yMax += y
self._boundsCache = (xMin, yMin, xMax, yMax)
if oldControlPointBounds:
xMin, yMin, xMax, yMax = oldControlPointBounds
xMin += x
yMin += y
xMax += x
yMax += y
self._controlPointBoundsCache = (xMin, yMin, xMax, yMax)
def pointInside(self, (x, y), evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the glyph.
"""
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
# ---------------
# Representations
# ---------------
def representationKeys(self):
"""
Get a list of all representation keys
that have been called within this object.
"""
representations = []
for key in self._representations.keys():
if isinstance(key, basestring):
name = key
kwargs = {}
else:
name = key[0]
kwargs = {}
for k, v in key[1:]:
kwargs[k] = v
representations.append((name, kwargs))
return representations
def destroyRepresentation(self, name, **kwargs):
"""
Destroy the stored representation for **name**
and **\*\*kwargs**.
"""
key = self._makeRepresentationKey(name, **kwargs)
if key in self._representations:
del self._representations[key]
def destroyAllRepresentations(self, notification=None):
"""
Destroy all representations.
"""
self._representations = {}
def getRepresentation(self, name, **kwargs):
"""
Get a representation. **name** must be a registered
representation name. **\*\*kwargs** will be passed
to the appropriate representation factory.
"""
key = self._makeRepresentationKey(name, **kwargs)
if key not in self._representations:
factory = _representationFactories[name]
representation = factory(self, self.getParent(), **kwargs)
self._representations[key] = representation
return self._representations[key]
def hasCachedRepresentation(self, name, **kwargs):
"""
Returns a boolean indicating if a representation for
**name** and **\*\*kwargs** is cahced in the glyph.
"""
key = self._makeRepresentationKey(name, **kwargs)
return key in self._representations
def _makeRepresentationKey(self, name, **kwargs):
if kwargs:
key = [name] + sorted(kwargs.items())
key = tuple(key)
else:
key = name
return key
# ----
# Undo
# ----
def getDataToSerializeForUndo(self):
data = dict(
contours=[contour.serializeForUndo(pack=False) for contour in self._contours],
components=[component.serializeForUndo(pack=False) for component in self._components],
anchors=[anchor.serializeForUndo(pack=False) for anchor in self._anchors],
name=self.name,
unicodes=self.unicodes,
width=self.width,
lib=self.lib.serializeForUndo(pack=False)
)
return data
def loadDeserializedDataFromUndo(self, data):
# clear contours, components, anchors
self.clear()
# contours
for contourData in data["contours"]:
contour = self.contourClass(pointClass=self.pointClass)
contour.deserializeFromUndo(contourData)
self.appendContour(contour)
# components
for componentData in data["components"]:
component = self.componentClass()
component.deserializeFromUndo(componentData)
self.appendComponent(component)
# anchors
for anchorData in data["anchors"]:
anchor = self.anchorClass()
anchor.deserializeFromUndo(anchorData)
self.appendAnchor(anchor)
# basic attributes
self.name = data["name"]
self.unicodes = data["unicodes"]
self.width = data["width"]
# lib
self.lib.deserializeFromUndo(data["lib"])
# ----------------------
# Notification Callbacks
# ----------------------
def _outlineContentChanged(self, notification):
self._destroyBoundsCache()
self.dirty = True
def _libContentChanged(self, notification):
self.dirty = True
# -----
# Tests
# -----
def _testName():
"""
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name = 'RenamedGlyph'
>>> glyph.name
'RenamedGlyph'
>>> keys = font.keys()
>>> keys.sort()
>>> keys
['B', 'C', 'RenamedGlyph']
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name = 'A'
>>> glyph.dirty
False
>>> font._scheduledForDeletion
[]
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name
'A'
"""
def _testUnicodes():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.unicodes
[65]
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.unicodes = [123, 456]
>>> glyph.unicodes
[123, 456]
>>> glyph.dirty
True
"""
def _testBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.bounds
(0, 0, 700, 700)
>>> glyph = font['B']
>>> glyph.bounds
(0, 0, 700, 700)
>>> glyph = font['C']
>>> glyph.bounds
(0.0, 0.0, 700.0, 700.0)
"""
def _testControlPointBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.controlPointBounds
(0, 0, 700, 700)
>>> glyph = font['B']
>>> glyph.controlPointBounds
(0, 0, 700, 700)
>>> glyph = font['C']
>>> glyph.controlPointBounds
(0.0, 0.0, 700.0, 700.0)
"""
def _testLeftMargin():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.leftMargin
0
>>> glyph = font['B']
>>> glyph.leftMargin
0
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.leftMargin = 100
>>> glyph.leftMargin
100
>>> glyph.width
800
>>> glyph.dirty
True
"""
def _testRightMargin():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.rightMargin
0
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.rightMargin = 100
>>> glyph.rightMargin
100
>>> glyph.width
800
>>> glyph.dirty
True
"""
def _testWidth():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.width
700
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.width = 100
>>> glyph.width
100
>>> glyph.dirty
True
"""
def _testComponents():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> len(glyph.components)
2
"""
def _testAnchors():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> len(glyph.anchors)
2
"""
def _testLen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> len(glyph)
2
"""
def _testIter():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> for contour in glyph:
... print len(contour)
4
4
"""
def _testAppendContour():
"""
>>> from defcon.objects.contour import Contour
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> contour = Contour()
>>> glyph.appendContour(contour)
>>> len(glyph)
1
>>> glyph.dirty
True
"""
def _testAppendComponent():
"""
>>> from defcon.objects.component import Component
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> component = Component()
>>> glyph.appendComponent(component)
>>> len(glyph.components)
1
>>> glyph.dirty
True
"""
def _testAppendAnchor():
"""
>>> from defcon.objects.anchor import Anchor
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> anchor = Anchor()
>>> glyph.appendAnchor(anchor)
>>> len(glyph.anchors)
1
>>> glyph.dirty
True
"""
def _testRemoveContour():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> glyph.removeContour(contour)
>>> contour in glyph._contours
False
"""
def _testRemoveComponent():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> component = glyph.components[0]
>>> glyph.removeComponent(component)
>>> component in glyph.components
False
"""
def _testRemoveAnchor():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> anchor = glyph.anchors[0]
>>> glyph.removeAnchor(anchor)
>>> anchor in glyph.anchors
False
"""
def _testContourIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> glyph.contourIndex(contour)
0
>>> contour = glyph[1]
>>> glyph.contourIndex(contour)
1
"""
def _testComponentIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> component = glyph.components[0]
>>> glyph.componentIndex(component)
0
>>> component = glyph.components[1]
>>> glyph.componentIndex(component)
1
"""
def _testAnchorIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> anchor = glyph.anchors[0]
>>> glyph.anchorIndex(anchor)
0
>>> anchor = glyph.anchors[1]
>>> glyph.anchorIndex(anchor)
1
"""
def _testClear():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clear()
>>> len(glyph)
0
>>> len(glyph.anchors)
0
>>> glyph = font['C']
>>> glyph.clear()
>>> len(glyph.components)
0
"""
def _testClearContours():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clearContours()
>>> len(glyph)
0
"""
def _testClearComonents():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> glyph.clearComponents()
>>> len(glyph.components)
0
"""
def _testClearAnchors():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clearAnchors()
>>> len(glyph.anchors)
0
"""
def _testMove():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> xMin, yMin, xMax, yMax = glyph.bounds
>>> glyph.move((100, 50))
>>> (xMin+100, yMin+50, xMax+100, yMax+50) == glyph.bounds
True
>>> glyph = font['C']
>>> xMin, yMin, xMax, yMax = glyph.bounds
#>>> glyph.move((100, 50))
#>>> (xMin+100, yMin+50, xMax+100, yMax+50) == glyph.bounds
#True
"""
def _testPointInside():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.pointInside((100, 100))
True
>>> glyph.pointInside((350, 350))
False
>>> glyph.pointInside((-100, -100))
False
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
#!/usr/bin/env python
import Lp_FrontEndFunctions
import Lp_CursesDriver
import Lp_XmlParser
import Lp_RpcDispatcher
import Lp_InputProcessing
import Lp_Defines
import shutil
import os
import sys
import socket
import time
import subprocess
import signal
import platform
import threading
from datetime import datetime
BLOCKER_PORT = Lp_Defines.BLOCKER_PORT
RPC_DISPATCH_PORT = Lp_Defines.RPC_DISPATCH_PORT
PRINT_PORT = Lp_Defines.PRINT_PORT
BACKEND_PORT = Lp_Defines.BACKEND_PORT
FRONTEND_PORT = Lp_Defines.FRONTEND_PORT
#Prints anything recieved from the backend on port 1338. If an rpc
#confirmation is received, it sends the confirmation to the RPC Dispathcher.
class PrintThread(threading.Thread):
def __init__(self,processor,lFile):
self.sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.sock.bind(('127.0.0.1',PRINT_PORT))
self.sock.settimeout(.1)
self.cmdLoop=processor
self.log=lFile
threading.Thread.__init__(self)
def run(self):
while 1:
try:
stringIn,addr=self.sock.recvfrom(1024)
if stringIn.find('RECV')>=0:
self.sock.sendto(stringIn,('127.0.0.1',RPC_DISPATCH_PORT))
elif stringIn == '!!#QUIT':
break
else:
print stringIn,
self.log.write(stringIn)
sys.stdout.flush()
except:
continue
self.sock.close()
#Forces backend to ignore TERM signals sent to the front end on ctrl-c
def preexec_fcn():
signal.signal(signal.SIGINT,signal.SIG_IGN)
def setInitialOpenArgs(args):
args['command'] = '!open'
args['dstip'] = sys.argv[1]
args['dstport'] = sys.argv[2]
args['srcip'] = sys.argv[3]
args['srcport'] = sys.argv[4]
args['keyfile'] = sys.argv[5]
args['endOnString'] = 'DONE'
def setLpexArgs(args,file):
args['lpexfile'] = file
def setPortArgs(args,portNum):
args['outport'] = str(portNum)
def loadLpexes(func,curDir,lpArch,proc):
lpexPathList = []
alreadyLoaded = []
baseDir = os.path.join(curDir,'../Mods/App/')
baseList = os.listdir(baseDir)
archlist = []
archents = []
for subDir in baseList:
archlist = os.listdir(os.path.join(baseDir,subDir))
for arch in archlist:
archents = os.listdir(os.path.join(baseDir,subDir,arch))
for ent in archents:
lpexPathList.append(os.path.join(baseDir,subDir,arch,ent))
if lpArch == 'i386':
lpexExtension = '.lx32'
else:
lpexExtension = '.lx64'
lpexArgs = proc.Modules['Lp']['Lp.lpex']
for lpex in lpexPathList:
lpexName = lpex.split('/')[-1]
if lpex.find(lpexExtension)>=0 and not lpexName in alreadyLoaded:
print "%s..."%lpexName,
setLpexArgs(lpexArgs,lpex)
res = func.cmdGeneric('lpex',lpexArgs,{})
if res ==0:
print "done"
else:
print "failed"
alreadyLoaded.append(lpexName)
print ""
#Connects and prints the currently loaded modules and uptime.
#Arguments:
# proc: the Lp Input Processing object
# func: the Lp Front End Functions object
# sock: the socket used to communicate with backend
# outfile: the Lp log file
# lpArch: the Lp architecture
#Return:
# 1 on success
# -1 in fail
def showWelcome(proc,func,sock,outFile,lpArch):
currentDirectory=os.getcwd()
supportedArchs={'062':'x86_64','003':'i386','020':'ppc','021':'ppc64',
'002':'sparc','008':'mips_be','010':'mips_le',
'040':'arm','043':'sparcv9'}
openArgs = proc.Modules['Lp']['Lp.open']
setInitialOpenArgs(openArgs)
res = func.cmdGeneric('open',openArgs, {})
if res < 0:
print "Failed to connect."
return -1
try:
line=sock.recv(1024)
except:
print "Failed to connect."
return -1
lpexList = []
allArches = []
#load all lp extention files available
print "Loading Lp Extensions:"
loadLpexes(func,currentDirectory,lpArch,proc)
#Parse xml files for preloaded modules and print currently loaded modules
portArgs = proc.Modules['Lp']['Lp.port']
setPortArgs(portArgs, FRONTEND_PORT)
res = func.cmdGeneric('port',portArgs,{})
if res != 0:
print "Failed to set the output port."
return -1
res=func.cmdGeneric('mods',proc.Modules['Lp']['Lp.mods'],{})
modsToParse = []
print "******************Loaded Modules*****************",
try:
line=sock.recv(1024)
while line.find("RECV")<0:
print line,
outFile.write(line)
if line.find('name')<0 and line.find('--')<0 and line.find("Device")<0:
modName=line[8:25]
modName=modName.strip()
module=(modName+'.mo')
if len(module)>3:
modsToParse.append(module)
if modName=='PlatCore':
platCoreArch=line[46:len(line)].strip()
try:
proc.setArch(supportedArchs[platCoreArch])
except KeyError:
print "The reported architecture type of %s is not supported."%platCoreArch
return -1
line=sock.recv(1024)
except socket.timeout:
print "Failed to receive module list."
outFile.write("Failed to receive module list.")
return -1
except KeyboardInterrupt:
return -1
for module in modsToParse:
proc.parseXml(module,0)
portArgs = proc.Modules['Lp']['Lp.port']
setPortArgs(portArgs, PRINT_PORT)
res = func.cmdGeneric('port',portArgs,{})
if res < 0:
print "Failed to set the output port."
return -1
try:
res = func.cmdGeneric('uptime',proc.Modules['Core']['Core.uptime'],{})
except KeyError:
print "Error: Core module xml not parsed correctly. Uptime function was not found."
return -1
if res < 0:
print "Failed to send the initial uptime command."
return -1
proc.printBlocker.sendto('!!#REG_BLOCK%d'%res,('127.0.0.1',RPC_DISPATCH_PORT))
result = proc.printBlocker.recv(1024)
return 1
if __name__=='__main__':
try:
functions=0
processor=0
printThread=0
log=0
lpSock=0
out=0
ark=platform.architecture()[0]
if ark=='32bit':
lpArk='i386'
else:
lpArk='x86_64'
curDir=os.getcwd()
out=open((curDir+'/back.log'),'w+')
try:
logFiles=os.listdir('%s/Logs'%os.getcwd())
except OSError:
os.mkdir('%s/Logs'%curDir)
logFiles=os.listdir('%s/Logs'%os.getcwd())
numLogs=len(logFiles)
logFiles.sort()
#If there are more than 20 log files, delete the oldest one.
if numLogs>20 and sys.argv[6]=='1':
try:
os.remove('%s/Logs/%s'%(curDir,logFiles[0]))
except OSError:
print "Unable to remove oldest logfile."
date='%s'%datetime.date(datetime.now())
cTime='%s'%datetime.time(datetime.now())
cTime=cTime[:cTime.find('.')]
logname='%s_%s_lp.log'%(date,cTime)
log=open('%s/Logs/%s'%(curDir,logname),'w+')
lpSock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
lpSock.bind(('127.0.0.1',FRONTEND_PORT))
try:
oldBack = os.path.join(curDir,lpArk,'Backend.lp')
newBack = os.path.join(curDir,lpArk,'Backend.running')
shutil.copy(oldBack,newBack)
backRet = subprocess.call([newBack],
stdout=out,preexec_fn=preexec_fcn)
except IOError:
print "Unable to locate back end executatble. This should be located in Lp/<LP Architecture>"
out.close()
ThreadExit=0
lpSock.close()
sys.exit(-1)
if backRet != 0:
print "Back end failed to execute correctly. Return code: 0x%x"%backRet
out.close()
ThreadExit=0
lpSock.close()
sys.exit(-1)
time.sleep(1)
functions=Lp_FrontEndFunctions.Lp_FrontEndFcns(lpSock,log)
processor=Lp_InputProcessing.InputProcessor(functions,log,lpArk,lpSock)
processor.parseXml('Lp.mo',0)
processor.setDefaultOutDir()
functions.setProc(processor)
printThread=PrintThread(processor,log)
printThread.daemon=True
printThread.start()
rpcDispatch=Lp_RpcDispatcher.RpcDispatcher(processor)
rpcDispatch.start()
lpSock.sendto("!!#TURN_ON_PRINTING",('127.0.0.1',RPC_DISPATCH_PORT))
res=showWelcome(processor,functions,lpSock,log,lpArk)
if res>0:
processor.cmdloop()
else:
res = functions.cmdGeneric('term',{'command':'!term','endOnString':'DONE'},{})
if res < 0:
print "Failed to send term command on exit. Session may still be open."
lpSock.sendto("!!#QUIT",('127.0.0.1',RPC_DISPATCH_PORT))
lpSock.sendto("!!#QUIT",('127.0.0.1',PRINT_PORT))
subprocess.call(['killall','Backend.running'])
os.remove(newBack)
out.close()
log.close()
lpSock.close()
except KeyboardInterrupt:
if functions!=0:
res=functions.cmdGeneric('term',{'command':'!term','endOnString':'DONE'},{})
if out !=0:
try:
out.close()
except IOError:
pass
print "Goodbye"
if log!=0:
log.write('Goodbye\n')
log.write('Session terminated at %s'%str(datetime.now()))
log.close()
lpSock.sendto("!!#QUIT",('127.0.0.1',RPC_DISPATCH_PORT))
lpSock.sendto("!!#QUIT",('127.0.0.1',PRINT_PORT))
subprocess.call(['killall','Backend.running'])
os.remove(newBack)
if lpSock != 0:
lpSock.close()
| |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import pkg_resources
import shutil
import yaml
from .common import mkdir_p
METADATA_DIR_NAME = '.catkin_tools'
METADATA_README_TEXT = """\
# Catkin Tools Metadata
This directory was generated by catkin_tools and it contains persistent
configuration information used by the `catkin` command and its sub-commands.
Each subdirectory of the `profiles` directory contains a set of persistent
configuration options for separate profiles. The default profile is called
`default`. If another profile is desired, it can be described in the
`profiles.yaml` file in this directory.
Please see the catkin_tools documentation before editing any files in this
directory. Most actions can be performed with the `catkin` command-line
program.
"""
PROFILES_YML_FILE_NAME = 'profiles.yaml'
DEFAULT_PROFILE_NAME = 'default'
def get_metadata_root_path(workspace_path):
"""Construct the path to a root metadata directory.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The path to the metadata root directory or None if workspace_path isn't a string
:rtype: str or None
"""
# TODO: Should calling this without a string just be a fatal error?
if workspace_path is None:
return None
return os.path.join(workspace_path, METADATA_DIR_NAME)
def get_profiles_path(workspace_path):
"""Construct the path to a root metadata directory.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The path to the profile metadata directory or None if workspace_path isn't a string
:rtype: str or None
"""
if workspace_path is None:
return None
return os.path.join(workspace_path, METADATA_DIR_NAME, 'profiles')
def get_paths(workspace_path, profile_name, verb=None):
"""Construct the path to a metadata directory and verb-specific metadata file.
Note: these paths are not guaranteed to exist. This function simply serves
to standardize where these files should be located.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name
:type profile_name: str
:param verb: (optional) The catkin_tools verb with which this information is associated.
:returns: A tuple of the metadata directory and the verb-specific file path, if given
"""
# Get the root of the metadata directory
profiles_path = get_profiles_path(workspace_path)
# Get the active profile directory
metadata_path = os.path.join(profiles_path, profile_name) if profile_name else None
# Get the metadata for this verb
metadata_file_path = os.path.join(metadata_path, '%s.yaml' % verb) if profile_name and verb else None
return (metadata_path, metadata_file_path)
def find_enclosing_workspace(search_start_path):
"""Find a catkin workspace based on the existence of a catkin_tools
metadata directory starting in the path given by search_path and traversing
each parent directory until either finding such a directory or getting to
the root of the filesystem.
:search_start_path: Directory which either is a catkin workspace or is
contained in a catkin workspace
:returns: Path to the workspace if found, `None` if not found.
"""
while search_start_path:
# Check if marker file exists
candidate_path = os.path.join(search_start_path, METADATA_DIR_NAME)
if os.path.exists(candidate_path) and os.path.isdir(candidate_path):
return search_start_path
# Update search path or end
(search_start_path, child_path) = os.path.split(search_start_path)
if len(child_path) == 0:
break
return None
def migrate_metadata(workspace_path):
"""Migrate metadata if it's out of date."""
metadata_root_path = get_metadata_root_path(workspace_path)
# Nothing there to migrate
if not metadata_root_path or not os.path.exists(metadata_root_path):
return
# Check metadata version
last_version = None
current_version = pkg_resources.require("catkin_tools")[0].version
version_file_path = os.path.join(metadata_root_path, 'VERSION')
# Read the VERSION file
if os.path.exists(version_file_path):
with open(version_file_path, 'r') as metadata_version:
last_version = metadata_version.read() or None
if last_version != current_version:
# Write the VERSION file
with open(version_file_path, 'w') as metadata_version:
metadata_version.write(current_version)
migrate_metadata_version(workspace_path, last_version)
def migrate_metadata_version(workspace_path, old_version):
"""Migrate between metadata versions.
"""
# Versions were added in 0.4.0, and the previously released version was 0.3.1
if old_version is None:
old_version = '0.3.1'
old_version = tuple((int(i) for i in old_version.split('.')))
metadata_root_path = get_metadata_root_path(workspace_path)
new_profiles_path = os.path.join(metadata_root_path, 'profiles')
# Restructure profiles directory
if old_version < (0, 4, 0):
for profile_name in os.listdir(metadata_root_path):
if profile_name == 'profiles':
continue
profile_path = os.path.join(metadata_root_path, profile_name)
if not os.path.isdir(profile_path):
continue
new_path = os.path.join(new_profiles_path, profile_name)
shutil.move(profile_path, new_path)
# Update metadata
for profile_name in get_profile_names(workspace_path):
for verb in ['config', 'build']:
# Get the current metadata
metadata = get_metadata(workspace_path, profile_name, verb)
# Update devel layout for 0.3.1 -> 0.4.0
if old_version < (0, 4, 0):
isolate_devel = metadata.get('isolate_devel')
if isolate_devel is not None:
del metadata['isolate_devel']
devel_layout = ('isolated' if isolate_devel else 'merged')
metadata['devel_layout'] = devel_layout
# Save the new metadata
update_metadata(workspace_path, profile_name, verb, metadata, no_init=True, merge=False)
def init_metadata_root(workspace_path, reset=False):
"""Create or reset a catkin_tools metadata directory with no content in a given path.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param reset: If true, clear the metadata directory of all information
:type reset: bool
"""
# Make sure the directory
if not os.path.exists(workspace_path):
raise IOError(
"Can't initialize Catkin workspace in path %s because it does "
"not exist." % (workspace_path))
# Check if the desired workspace is enclosed in another workspace
marked_workspace = find_enclosing_workspace(workspace_path)
if marked_workspace and marked_workspace != workspace_path:
raise IOError(
"Can't initialize Catkin workspace in path %s because it is "
"already contained in another workspace: %s." %
(workspace_path, marked_workspace))
# Construct the full path to the metadata directory
metadata_root_path = get_metadata_root_path(workspace_path)
# Check if a metadata directory already exists
if os.path.exists(metadata_root_path):
# Reset the directory if requested
if reset:
print("Deleting existing metadata from catkin_tools metadata directory: %s" % (metadata_root_path))
shutil.rmtree(metadata_root_path)
os.mkdir(metadata_root_path)
else:
# Create a new .catkin_tools directory
os.mkdir(metadata_root_path)
# Write the README file describing the directory
with open(os.path.join(metadata_root_path, 'README'), 'w') as metadata_readme:
metadata_readme.write(METADATA_README_TEXT)
# Migrate the metadata, if necessary
migrate_metadata(workspace_path)
# Add a catkin ignore file so we can store package.xml files for cleaned packages
if not os.path.exists(os.path.join(metadata_root_path, 'CATKIN_IGNORE')):
open(os.path.join(metadata_root_path, 'CATKIN_IGNORE'), 'a').close()
def init_profile(workspace_path, profile_name, reset=False):
"""Initialize a profile directory in a given workspace.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to initialize
:type profile_name: str
"""
init_metadata_root(workspace_path)
(profile_path, _) = get_paths(workspace_path, profile_name)
# Check if a profile directory already exists
if os.path.exists(profile_path):
# Reset the directory if requested
if reset:
print("Deleting existing profile from catkin_tools profile directory: %s" % (profile_path))
shutil.rmtree(profile_path)
os.mkdir(profile_path)
else:
# Create a new .catkin_tools directory
mkdir_p(profile_path)
def get_profile_names(workspace_path):
"""Get a list of profile names available to a given workspace.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: A list of the available profile names in the given workspace
:rtype: list
"""
migrate_metadata(workspace_path)
profiles_path = get_profiles_path(workspace_path)
if os.path.exists(profiles_path):
directories = next(os.walk(profiles_path))[1]
return directories
return []
def remove_profile(workspace_path, profile_name):
"""Remove a profile by name.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to delete
:type profile_name: str
"""
migrate_metadata(workspace_path)
(profile_path, _) = get_paths(workspace_path, profile_name)
if os.path.exists(profile_path):
shutil.rmtree(profile_path)
def set_active_profile(workspace_path, profile_name):
"""Set a profile in a given workspace to be active.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to activate
:type profile_name: str
"""
profiles_data = get_profiles_data(workspace_path)
profiles_data['active'] = profile_name
profiles_path = get_profiles_path(workspace_path)
profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME)
with open(profiles_yaml_file_path, 'w') as profiles_file:
yaml.dump(profiles_data, profiles_file, default_flow_style=False)
def get_active_profile(workspace_path):
"""Get the active profile name from a workspace path.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The active profile name
:rtype: str
"""
profiles_data = get_profiles_data(workspace_path)
if 'active' in profiles_data:
return profiles_data['active']
return DEFAULT_PROFILE_NAME
def get_profiles_data(workspace_path):
"""Get the contents of the profiles file.
This file contains information such as the currently active profile.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The contents of the root profiles file if it exists
:rtype: dict
"""
migrate_metadata(workspace_path)
if workspace_path is not None:
profiles_path = get_profiles_path(workspace_path)
profiles_yaml_file_path = os.path.join(profiles_path, PROFILES_YML_FILE_NAME)
if os.path.exists(profiles_yaml_file_path):
with open(profiles_yaml_file_path, 'r') as profiles_file:
return yaml.load(profiles_file)
return {}
def get_metadata(workspace_path, profile, verb):
"""Get a python structure representing the metadata for a given verb.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param profile: The catkin_tools metadata profile name
:type profile: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:returns: A python structure representing the YAML file contents (empty
dict if the file does not exist)
:rtype: dict
"""
migrate_metadata(workspace_path)
(metadata_path, metadata_file_path) = get_paths(workspace_path, profile, verb)
if not os.path.exists(metadata_file_path):
return dict()
with open(metadata_file_path, 'r') as metadata_file:
return yaml.load(metadata_file)
def update_metadata(workspace_path, profile, verb, new_data={}, no_init=False, merge=True):
"""Update the catkin_tools verb metadata for a given profile.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param profile: The catkin_tools metadata profile name
:type profile: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:param new_data: A python dictionary or array to write to the metadata file
:type new_data: dict
"""
migrate_metadata(workspace_path)
(metadata_path, metadata_file_path) = get_paths(workspace_path, profile, verb)
# Make sure the metadata directory exists
if not no_init:
init_metadata_root(workspace_path)
init_profile(workspace_path, profile)
# Get the curent metadata for this verb
if merge:
data = get_metadata(workspace_path, profile, verb)
else:
data = dict()
# Update the metadata for this verb
data.update(new_data)
with open(metadata_file_path, 'w') as metadata_file:
yaml.dump(data, metadata_file, default_flow_style=False)
return data
def get_active_metadata(workspace_path, verb):
"""Get a python structure representing the metadata for a given verb.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:returns: A python structure representing the YAML file contents (empty
dict if the file does not exist)
:rtype: dict
"""
active_profile = get_active_profile(workspace_path)
get_metadata(workspace_path, active_profile, verb)
def update_active_metadata(workspace_path, verb, new_data={}):
"""Update the catkin_tools verb metadata for the active profile.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:param new_data: A python dictionary or array to write to the metadata file
:type new_data: dict
"""
active_profile = get_active_profile(workspace_path)
update_active_metadata(workspace_path, active_profile, verb, new_data)
| |
#!/usr/bin/env python
"""Tests for the dtypes and converters arguments to CSV import.
"""
import pytest
from io import BytesIO
from unittest import mock
from tdclient import api, Client
from tdclient.util import read_csv_records
from tdclient.test.test_helper import gunzipb
from tdclient.test.test_helper import make_response
from tdclient.test.test_helper import msgunpackb
DEFAULT_DATA = [
{
"time": "100",
"col1": "0001",
"col2": "10",
"col3": "1.0",
"col4": "abcd",
"col5": "true",
"col6": "none",
},
{
"time": "200",
"col1": "0002",
"col2": "20",
"col3": "2.0",
"col4": "efgh",
"col5": "false",
"col6": "",
},
]
def sample_reader(data=DEFAULT_DATA):
"""A very simple emulation of the actual CSV readers.
"""
for item in data:
yield item
def test_basic_read_csv_records():
"""The base test of read_csv_records - no customisation.
"""
reader = sample_reader()
result = list(read_csv_records(reader))
assert result == [
{
"time": 100,
"col1": 1,
"col2": 10,
"col3": 1.0,
"col4": "abcd",
"col5": True,
"col6": None,
},
{
"time": 200,
"col1": 2,
"col2": 20,
"col3": 2.0,
"col4": "efgh",
"col5": False,
"col6": None,
},
]
def test_unsupported_dtype_gives_error():
reader = sample_reader()
with pytest.raises(ValueError) as excinfo:
# Remember, it won't yield anything if we don't "next" it
next(read_csv_records(reader, dtypes={"something": "no-such-dtype"}))
assert "Unrecognized dtype 'no-such-dtype'" in str(excinfo.value)
def test_guess_dtype_gives_default_result():
reader = sample_reader()
result = list(
read_csv_records(
reader,
dtypes={
"time": "guess",
"col1": "guess",
"col2": "guess",
"col3": "guess",
"col4": "guess",
"col5": "guess",
"col6": "guess",
},
)
)
assert result == [
{
"time": 100,
"col1": 1,
"col2": 10,
"col3": 1.0,
"col4": "abcd",
"col5": True,
"col6": None,
},
{
"time": 200,
"col1": 2,
"col2": 20,
"col3": 2.0,
"col4": "efgh",
"col5": False,
"col6": None,
},
]
def test_dtypes_change_parsing():
reader = sample_reader()
result = list(
read_csv_records(
reader, dtypes={"col1": "str", "col2": "float", "col6": "str",}
)
)
assert result == [
{
"time": 100,
"col1": "0001",
"col2": 10.0,
"col3": 1.0,
"col4": "abcd",
"col5": True,
"col6": "none",
},
{
"time": 200,
"col1": "0002",
"col2": 20.0,
"col3": 2.0,
"col4": "efgh",
"col5": False,
"col6": "",
},
]
def test_converters_change_parsing():
reader = sample_reader()
result = list(
read_csv_records(reader, converters={"col1": str, "col2": float, "col6": str,})
)
assert result == [
{
"time": 100,
"col1": "0001",
"col2": 10.0,
"col3": 1.0,
"col4": "abcd",
"col5": True,
"col6": "none",
},
{
"time": 200,
"col1": "0002",
"col2": 20.0,
"col3": 2.0,
"col4": "efgh",
"col5": False,
"col6": "",
},
]
def test_dtypes_plus_converters_change_parsing():
reader = sample_reader()
result = list(
read_csv_records(
reader, dtypes={"col1": "str", "col6": "str",}, converters={"col2": float,}
)
)
assert result == [
{
"time": 100,
"col1": "0001",
"col2": 10.0,
"col3": 1.0,
"col4": "abcd",
"col5": True,
"col6": "none",
},
{
"time": 200,
"col1": "0002",
"col2": 20.0,
"col3": 2.0,
"col4": "efgh",
"col5": False,
"col6": "",
},
]
def test_dtypes_overridden_by_converters():
reader = sample_reader()
result = list(
read_csv_records(
reader,
dtypes={
"time": "bool", # overridden by converters
"col1": "str",
"col2": "int", # overridden by converters
"col6": "str",
},
converters={"time": int, "col2": float, "col5": str,},
)
)
assert result == [
{
"time": 100,
"col1": "0001",
"col2": 10.0,
"col3": 1.0,
"col4": "abcd",
"col5": "true",
"col6": "none",
},
{
"time": 200,
"col1": "0002",
"col2": 20.0,
"col3": 2.0,
"col4": "efgh",
"col5": "false",
"col6": "",
},
]
DEFAULT_HEADER_BYTE_CSV = (
b"time,col1,col2,col3,col4\n" b"100,0001,10,1.0,abcd\n" b"200,0002,20,2.0,efgh\n"
)
def test_import_file_supports_dtypes_and_converters():
def import_data(db, table, format, stream, size, unique_id=None):
data = stream.read(size)
assert msgunpackb(gunzipb(data)) == [
{"time": 100, "col1": "0001", "col2": 10.0, "col3": 1.0, "col4": "abcd"},
{"time": 200, "col1": "0002", "col2": 20.0, "col3": 2.0, "col4": "efgh"},
]
td = api.API("APIKEY")
td.import_data = import_data
td.import_file(
"db",
"table",
"csv",
BytesIO(DEFAULT_HEADER_BYTE_CSV),
dtypes={"col1": "str", "col6": "str"},
converters={"col2": float},
)
def test_bulk_import_upload_file_supports_dtypes_and_converters():
def bulk_import_upload_part(name, part_name, stream, size):
data = stream.read(size)
assert msgunpackb(gunzipb(data)) == [
{"time": 100, "col1": "0001", "col2": 10.0, "col3": 1.0, "col4": "abcd"},
{"time": 200, "col1": "0002", "col2": 20.0, "col3": 2.0, "col4": "efgh"},
]
td = api.API("APIKEY")
td.bulk_import_upload_part = bulk_import_upload_part
td.bulk_import_upload_file(
"name",
"part-name",
"csv",
BytesIO(DEFAULT_HEADER_BYTE_CSV),
dtypes={"col1": "str", "col6": "str"},
converters={"col2": float},
)
def test_bulk_import_dot_upload_file_supports_dtypes_and_converters():
def bulk_import_upload_part(name, part_name, stream, size):
data = stream.read(size)
assert msgunpackb(gunzipb(data)) == [
{"time": 100, "col1": "0001", "col2": 10.0, "col3": 1.0, "col4": "abcd"},
{"time": 200, "col1": "0002", "col2": 20.0, "col3": 2.0, "col4": "efgh"},
]
with Client("APIKEY") as td:
td.api.post = mock.MagicMock(return_value=make_response(200, b""))
td.api.bulk_import_upload_part = bulk_import_upload_part
bulk_import = td.create_bulk_import("session-name", "mydb", "mytbl")
bulk_import.update = mock.MagicMock()
bulk_import.upload_file(
"part-name",
"csv",
BytesIO(DEFAULT_HEADER_BYTE_CSV),
dtypes={"col1": "str", "col6": "str"},
converters={"col2": float},
)
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import logging
import os
import sys
import random
from random import shuffle
import numpy as np
import scipy as scp
import scipy.misc
from scipy.misc import imread, imresize
import tensorflow as tf
from utils.data_utils import (annotation_jitter, annotation_to_h5)
from utils.annolist import AnnotationLib as AnnoLib
from utils.rect import Rect
import threading
from collections import namedtuple
fake_anno = namedtuple('fake_anno_object', ['rects'])
def read_kitti_anno(label_file, detect_truck):
""" Reads a kitti annotation file.
Args:
label_file: Path to file
Returns:
Lists of rectangels: Cars and don't care area.
"""
labels = [line.rstrip().split(' ') for line in open(label_file)]
rect_list = []
for label in labels:
if not (label[0] == 'Car' or label[0] == 'Van' or
label[0] == 'Truck' or label[0] == 'DontCare'):
continue
notruck = not detect_truck
if notruck and label[0] == 'Truck':
continue
if label[0] == 'DontCare':
class_id = -1
else:
class_id = 1
object_rect = AnnoLib.AnnoRect(
x1=float(label[4]), y1=float(label[5]),
x2=float(label[6]), y2=float(label[7]))
assert object_rect.x1 < object_rect.x2
assert object_rect.y1 < object_rect.y2
object_rect.classID = class_id
rect_list.append(object_rect)
return rect_list
def _rescale_boxes(current_shape, anno, target_height, target_width):
x_scale = target_width / float(current_shape[1])
y_scale = target_height / float(current_shape[0])
for r in anno.rects:
assert r.x1 < r.x2
r.x1 *= x_scale
r.x2 *= x_scale
assert r.x1 < r.x2
r.y1 *= y_scale
r.y2 *= y_scale
return anno
def _generate_mask(hypes, ignore_rects):
width = hypes["image_width"]
height = hypes["image_height"]
grid_width = hypes["grid_width"]
grid_height = hypes["grid_height"]
mask = np.ones([grid_height, grid_width])
if not hypes['use_mask']:
return mask
for rect in ignore_rects:
left = int((rect.x1+2)/width*grid_width)
right = int((rect.x2-2)/width*grid_width)
top = int((rect.y1+2)/height*grid_height)
bottom = int((rect.y2-2)/height*grid_height)
for x in range(left, right+1):
for y in range(top, bottom+1):
mask[y, x] = 0
return mask
def _load_kitti_txt(kitti_txt, hypes, jitter=False, random_shuffel=True):
"""Take the txt file and net configuration and create a generator
that outputs a jittered version of a random image from the annolist
that is mean corrected."""
base_path = os.path.realpath(os.path.dirname(kitti_txt))
files = [line.rstrip() for line in open(kitti_txt)]
if hypes['data']['truncate_data']:
files = files[:10]
random.seed(0)
for epoch in itertools.count():
if random_shuffel:
random.shuffle(files)
for file in files:
image_file, gt_image_file = file.split(" ")
image_file = os.path.join(base_path, image_file)
assert os.path.exists(image_file), \
"File does not exist: %s" % image_file
gt_image_file = os.path.join(base_path, gt_image_file)
assert os.path.exists(gt_image_file), \
"File does not exist: %s" % gt_image_file
rect_list = read_kitti_anno(gt_image_file,
detect_truck=hypes['detect_truck'])
anno = AnnoLib.Annotation()
anno.rects = rect_list
im = scp.misc.imread(image_file)
if im.shape[2] == 4:
im = im[:, :, :3]
if im.shape[0] != hypes["image_height"] or \
im.shape[1] != hypes["image_width"]:
if True:
anno = _rescale_boxes(im.shape, anno,
hypes["image_height"],
hypes["image_width"])
im = imresize(
im, (hypes["image_height"], hypes["image_width"]),
interp='cubic')
if jitter:
jitter_scale_min = 0.9
jitter_scale_max = 1.1
jitter_offset = 16
im, anno = annotation_jitter(
im, anno, target_width=hypes["image_width"],
target_height=hypes["image_height"],
jitter_scale_min=jitter_scale_min,
jitter_scale_max=jitter_scale_max,
jitter_offset=jitter_offset)
pos_list = [rect for rect in anno.rects if rect.classID == 1]
pos_anno = fake_anno(pos_list)
boxes, confs = annotation_to_h5(hypes,
pos_anno,
hypes["grid_width"],
hypes["grid_height"],
hypes["rnn_len"])
mask_list = [rect for rect in anno.rects if rect.classID == -1]
mask = _generate_mask(hypes, mask_list)
boxes = boxes.reshape([hypes["grid_height"],
hypes["grid_width"], 4])
confs = confs.reshape(hypes["grid_height"], hypes["grid_width"])
yield {"image": im, "boxes": boxes, "confs": confs,
"rects": pos_list, "mask": mask}
def _make_sparse(n, d):
v = np.zeros((d,), dtype=np.float32)
v[n] = 1.
return v
def create_queues(hypes, phase):
"""Create Queues."""
hypes["rnn_len"] = 1
dtypes = [tf.float32, tf.float32, tf.float32, tf.float32]
grid_size = hypes['grid_width'] * hypes['grid_height']
shapes = ([hypes['image_height'], hypes['image_width'], 3],
[hypes['grid_height'], hypes['grid_width']],
[hypes['grid_height'], hypes['grid_width'], 4],
[hypes['grid_height'], hypes['grid_width']])
capacity = 30
q = tf.FIFOQueue(capacity=capacity, dtypes=dtypes, shapes=shapes)
return q
def _processe_image(hypes, image):
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
augment_level = hypes['augment_level']
if augment_level > 0:
image = tf.image.random_brightness(image, max_delta=30)
image = tf.image.random_contrast(image, lower=0.75, upper=1.25)
if augment_level > 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.6)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.minimum(image, 255.0)
image = tf.maximum(image, 0)
return image
def start_enqueuing_threads(hypes, q, phase, sess):
"""Start enqueuing threads."""
# Creating Placeholder for the Queue
x_in = tf.placeholder(tf.float32)
confs_in = tf.placeholder(tf.float32)
boxes_in = tf.placeholder(tf.float32)
mask_in = tf.placeholder(tf.float32)
# Creating Enqueue OP
enqueue_op = q.enqueue((x_in, confs_in, boxes_in, mask_in))
def make_feed(data):
return {x_in: data['image'],
confs_in: data['confs'],
boxes_in: data['boxes'],
mask_in: data['mask']}
def thread_loop(sess, enqueue_op, gen):
for d in gen:
sess.run(enqueue_op, feed_dict=make_feed(d))
data_file = hypes["data"]['%s_file' % phase]
data_dir = hypes['dirs']['data_dir']
data_file = os.path.join(data_dir, data_file)
gen = _load_kitti_txt(data_file, hypes,
jitter={'train': hypes['solver']['use_jitter'],
'val': False}[phase])
data = gen.next()
sess.run(enqueue_op, feed_dict=make_feed(data))
t = threading.Thread(target=thread_loop,
args=(sess, enqueue_op, gen))
t.daemon = True
t.start()
def test_new_kitti():
idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl"
kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt"
with open('hypes/kittiBox.json', 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
hypes["rnn_len"] = 1
hypes["image_height"] = 200
hypes["image_width"] = 800
gen1 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False)
gen2 = _load_kitti_txt(idlfile, hypes, random_shuffel=False)
print('testing generators')
for i in range(20):
data1 = gen1.next()
data2 = gen2.next()
rects1 = data1['rects']
rects2 = data2['rects']
assert len(rects1) <= len(rects2)
if not len(rects1) == len(rects2):
print('ignoring flags')
continue
else:
print('comparing flags')
assert(np.all(data1['image'] == data2['image']))
# assert(np.all(data1['boxes'] == data2['boxes']))
if np.all(data1['flags'] == data2['flags']):
print('same')
else:
print('diff')
def inputs(hypes, q, phase):
if phase == 'val':
image, confidences, boxes, mask = q.dequeue()
image = tf.expand_dims(image, 0)
confidences = tf.expand_dims(confidences, 0)
boxes = tf.expand_dims(boxes, 0)
mask = tf.expand_dims(mask, 0)
return image, (confidences, boxes, mask)
elif phase == 'train':
image, confidences, boxes, mask = q.dequeue_many(hypes['batch_size'])
image = _processe_image(hypes, image)
return image, (confidences, boxes, mask)
else:
assert("Bad phase: {}".format(phase))
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""This module contains helpers for modeling X-ray spectra with the `Sherpa
<http://cxc.harvard.edu/sherpa/>`_ package.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
PowerLawApecDemModel
make_fixed_temp_multi_apec
expand_rmf_matrix
derive_identity_rmf
derive_identity_arf
get_source_qq_data
get_bkg_qq_data
make_qq_plot
make_multi_qq_plots
make_spectrum_plot
make_multi_spectrum_plots
'''.split()
import numpy as np
from sherpa.astro import ui
from sherpa.astro.xspec import XSAdditiveModel, _xspec
from sherpa.models import Parameter
from sherpa.models.parameter import hugeval
# Some helpful models
DEFAULT_KT_ARRAY = np.logspace(-1.5, 1, 20)
class PowerLawApecDemModel(XSAdditiveModel):
"""A model with contributions from APEC plasmas at a range of
temperatures, scaling with temperature.
Constructor arguments are:
*name*
The Sherpa name of the resulting model instance.
*kt_array* = None
An array of temperatures to use for the plasma models. If left at the
default of None, a hard-coded default is used that spans temperatures of
~0.03 to 10 keV with logarithmic spacing.
The contribution at each temperature scales with kT as a power law. The
model parameters are:
*gfac*
The power-law normalization parameter. The contribution at temperature *kT*
is ``norm * kT**gfac``.
*Abundanc*
The standard APEC abundance parameter.
*redshift*
The standard APEC redshift parameter.
*norm*
The standard overall normalization parameter.
This model is only efficient to compute if *Abundanc* and *redshift* are
frozen.
"""
def __init__(self, name, kt_array=None):
if kt_array is None:
kt_array = DEFAULT_KT_ARRAY
else:
kt_array = np.atleast_1d(np.asfarray(kt_array))
self.gfac = Parameter(name, 'gfac', 0.5, 1e-4, 1e4, 1e-6, 1e6)
self.Abundanc = Parameter(name, 'Abundanc', 1., 0., 5., 0.0, hugeval, frozen=True)
self.redshift = Parameter(name, 'redshift', 0., -0.999, 10., -0.999, hugeval, frozen=True)
self.norm = Parameter(name, 'norm', 1.0, 0.0, 1e24, 0.0, hugeval)
self._kt_array = kt_array
self._cur_cache_key = None
self._cached_vals = None
XSAdditiveModel.__init__(self, name, (self.gfac, self.Abundanc, self.redshift, self.norm))
def _calc(self, params, *args, **kwargs):
gfac, abund, redshift, norm = params
cache_key = (abund, redshift)
if self._cur_cache_key != cache_key:
self._cached_vals = [None] * self._kt_array.size
for i in range(self._kt_array.size):
apec_params = [self._kt_array[i], abund, redshift, 1.]
self._cached_vals[i] = _xspec.xsaped(apec_params, *args, **kwargs)
self._cur_cache_key = cache_key
self._cached_vals = np.array(self._cached_vals).T
scales = norm * self._kt_array**gfac
return (self._cached_vals * scales).sum(axis=1)
ui.add_model(PowerLawApecDemModel)
def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
"""Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter.
"""
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models
def expand_rmf_matrix(rmf):
"""Expand an RMF matrix stored in compressed form.
*rmf*
An RMF object as might be returned by ``sherpa.astro.ui.get_rmf()``.
Returns:
A non-sparse RMF matrix.
The Response Matrix Function (RMF) of an X-ray telescope like Chandra can
be stored in a sparse format as defined in `OGIP Calibration Memo
CAL/GEN/92-002
<https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html>`_.
For visualization and analysis purposes, it can be useful to de-sparsify
the matrices stored in this way. This function does that, returning a
two-dimensional Numpy array.
"""
n_chan = rmf.e_min.size
n_energy = rmf.n_grp.size
expanded = np.zeros((n_energy, n_chan))
mtx_ofs = 0
grp_ofs = 0
for i in range(n_energy):
for j in range(rmf.n_grp[i]):
f = rmf.f_chan[grp_ofs]
n = rmf.n_chan[grp_ofs]
expanded[i,f:f+n] = rmf.matrix[mtx_ofs:mtx_ofs+n]
mtx_ofs += n
grp_ofs += 1
return expanded
def derive_identity_rmf(name, rmf):
"""Create an "identity" RMF that does not mix energies.
*name*
The name of the RMF object to be created; passed to Sherpa.
*rmf*
An existing RMF object on which to base this one.
Returns:
A new RMF1D object that has a response matrix that is as close to
diagonal as we can get in energy space, and that has a constant
sensitivity as a function of detector channel.
In many X-ray observations, the relevant background signal does not behave
like an astrophysical source that is filtered through the telescope's
response functions. However, I have been unable to get current Sherpa
(version 4.9) to behave how I want when working with backround models that
are *not* filtered through these response functions. This function
constructs an "identity" RMF response matrix that provides the best
possible approximation of a passthrough "instrumental response": it mixes
energies as little as possible and has a uniform sensitivity as a function
of detector channel.
"""
from sherpa.astro.data import DataRMF
from sherpa.astro.instrument import RMF1D
# The "x" axis of the desired matrix -- the columnar direction; axis 1 --
# is "channels". There are n_chan of them and each maps to a notional
# energy range specified by "e_min" and "e_max".
#
# The "y" axis of the desired matrix -- the row direction; axis 1 -- is
# honest-to-goodness energy. There are tot_n_energy energy bins, each
# occupying a range specified by "energ_lo" and "energ_hi".
#
# We want every channel that maps to a valid output energy to have a
# nonzero entry in the matrix. The relative sizes of n_energy and n_cell
# can vary, as can the bounds of which regions of each axis can be validly
# mapped to each other. So this problem is basically equivalent to that of
# drawing an arbitrary pixelated line on bitmap, without anti-aliasing.
#
# The output matrix is represented in a row-based sparse format.
#
# - There is a integer vector "n_grp" of size "n_energy". It gives the
# number of "groups" needed to fill in each row of the matrix. Let
# "tot_groups = sum(n_grp)". For a given row, "n_grp[row_index]" may
# be zero, indicating that the row is all zeros.
# - There are integer vectors "f_chan" and "n_chan", each of size
# "tot_groups", that define each group. "f_chan" gives the index of
# the first channel column populated by the group; "n_chan" gives the
# number of columns populated by the group. Note that there can
# be multiple groups for a single row, so successive group records
# may fill in different pieces of the same row.
# - Let "tot_cells = sum(n_chan)".
# - There is a vector "matrix" of size "tot_cells" that stores the actual
# matrix data. This is just a concatenation of all the data corresponding
# to each group.
# - Unpopulated matrix entries are zero.
#
# See expand_rmf_matrix() for a sloppy implementation of how to unpack
# this sparse format.
n_chan = rmf.e_min.size
n_energy = rmf.energ_lo.size
c_lo_offset = rmf.e_min[0]
c_lo_slope = (rmf.e_min[-1] - c_lo_offset) / (n_chan - 1)
c_hi_offset = rmf.e_max[0]
c_hi_slope = (rmf.e_max[-1] - c_hi_offset) / (n_chan - 1)
e_lo_offset = rmf.energ_lo[0]
e_lo_slope = (rmf.energ_lo[-1] - e_lo_offset) / (n_energy - 1)
e_hi_offset = rmf.energ_hi[0]
e_hi_slope = (rmf.energ_hi[-1] - e_hi_offset) / (n_energy - 1)
all_e_indices = np.arange(n_energy)
all_e_los = e_lo_slope * all_e_indices + e_lo_offset
start_chans = np.floor((all_e_los - c_lo_offset) / c_lo_slope).astype(np.int)
all_e_his = e_hi_slope * all_e_indices + e_hi_offset
stop_chans = np.ceil((all_e_his - c_hi_offset) / c_hi_slope).astype(np.int)
first_e_index_on_channel_grid = 0
while stop_chans[first_e_index_on_channel_grid] < 0:
first_e_index_on_channel_grid += 1
last_e_index_on_channel_grid = n_energy - 1
while start_chans[last_e_index_on_channel_grid] >= n_chan:
last_e_index_on_channel_grid -= 1
n_nonzero_rows = last_e_index_on_channel_grid + 1 - first_e_index_on_channel_grid
e_slice = slice(first_e_index_on_channel_grid, last_e_index_on_channel_grid + 1)
n_grp = np.zeros(n_energy, dtype=np.int)
n_grp[e_slice] = 1
start_chans = np.maximum(start_chans[e_slice], 0)
stop_chans = np.minimum(stop_chans[e_slice], n_chan - 1)
# We now have a first cut at a row-oriented expression of our "identity"
# RMF. However, it's conservative. Trim down to eliminate overlaps between
# sequences.
for i in range(n_nonzero_rows - 1):
my_end = stop_chans[i]
next_start = start_chans[i+1]
if next_start <= my_end:
stop_chans[i] = max(start_chans[i], next_start - 1)
# Results are funky unless the sums along the vertical axis are constant.
# Ideally the sum along the *horizontal* axis would add up to 1 (since,
# ideally, each row is a probability distribution), but it is not
# generally possible to fulfill both of these constraints simultaneously.
# The latter constraint does not seem to matter in practice so we ignore it.
# Due to the funky encoding of the matrix, we need to build a helper table
# to meet the vertical-sum constraint.
counts = np.zeros(n_chan, dtype=np.int)
for i in range(n_nonzero_rows):
counts[start_chans[i]:stop_chans[i]+1] += 1
counts[:start_chans.min()] = 1
counts[stop_chans.max()+1:] = 1
assert (counts > 0).all()
# We can now build the matrix.
f_chan = start_chans
rmfnchan = stop_chans + 1 - f_chan
assert (rmfnchan > 0).all()
matrix = np.zeros(rmfnchan.sum())
amounts = 1. / counts
ofs = 0
for i in range(n_nonzero_rows):
f = f_chan[i]
n = rmfnchan[i]
matrix[ofs:ofs+n] = amounts[f:f+n]
ofs += n
# All that's left to do is create the Python objects.
drmf = DataRMF(
name,
rmf.detchans,
rmf.energ_lo,
rmf.energ_hi,
n_grp,
f_chan,
rmfnchan,
matrix,
offset = 0,
e_min = rmf.e_min,
e_max = rmf.e_max,
header = None
)
return RMF1D(drmf, pha=rmf._pha)
def derive_identity_arf(name, arf):
"""Create an "identity" ARF that has uniform sensitivity.
*name*
The name of the ARF object to be created; passed to Sherpa.
*arf*
An existing ARF object on which to base this one.
Returns:
A new ARF1D object that has a uniform spectral response vector.
In many X-ray observations, the relevant background signal does not behave
like an astrophysical source that is filtered through the telescope's
response functions. However, I have been unable to get current Sherpa
(version 4.9) to behave how I want when working with backround models that
are *not* filtered through these response functions. This function
constructs an "identity" ARF response function that has uniform sensitivity
as a function of detector channel.
"""
from sherpa.astro.data import DataARF
from sherpa.astro.instrument import ARF1D
darf = DataARF(
name,
arf.energ_lo,
arf.energ_hi,
np.ones(arf.specresp.shape),
arf.bin_lo,
arf.bin_hi,
arf.exposure,
header = None,
)
return ARF1D(darf, pha=arf._pha)
def get_source_qq_data(id=None):
"""Get data for a quantile-quantile plot of the source data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
"""
sdata = ui.get_data(id=id)
kev = sdata.get_x()
obs_data = sdata.counts
model_data = ui.get_model(id=id)(kev)
return np.vstack((kev, obs_data, model_data))
def get_bkg_qq_data(id=None, bkg_id=None):
"""Get data for a quantile-quantile plot of the background data and model.
*id*
The dataset id for which to get the data; defaults if unspecified.
*bkg_id*
The identifier of the background; defaults if unspecified.
Returns:
An ndarray of shape ``(3, npts)``. The first slice is the energy axis in
keV; the second is the observed values in each bin (counts, or rate, or
rate per keV, etc.); the third is the corresponding model value in each
bin.
The inputs are implicit; the data are obtained from the current state of
the Sherpa ``ui`` module.
"""
bdata = ui.get_bkg(id=id, bkg_id=bkg_id)
kev = bdata.get_x()
obs_data = bdata.counts
model_data = ui.get_bkg_model(id=id, bkg_id=bkg_id)(kev)
return np.vstack((kev, obs_data, model_data))
def make_qq_plot(kev, obs, mdl, unit, key_text):
"""Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl* are measured; will
be shown on the plot axes.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
"""
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[-1], c_mdl[-1])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_mdl, c_obs, key_text)
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = mx * 1.05
c1 = mx * 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative model ' + unit, 'Cumulative data ' + unit)
p.defaultKeyOverlay.vAlign = 0.3
return p
def make_multi_qq_plots(arrays, key_text):
"""Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here.
"""
import omega as om
p = om.RectPlot()
p.addXY([0, 1.], [0, 1.], '1:1')
for index, array in enumerate(arrays):
kev, obs, mdl = array
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = 0.5 * (c_obs[-1] + c_mdl[-1])
c_obs /= mx
c_mdl /= mx
p.addXY(c_mdl, c_obs, '%s #%d' % (key_text, index))
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
#
# Note: this reuses the variables from the last loop iteration.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = 1.05
c1 = 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')
p.defaultKeyOverlay.vAlign = 0.3
return p
def make_spectrum_plot(model_plot, data_plot, desc, xmin_clamp=0.01,
min_valid_x=None, max_valid_x=None):
"""Make a plot of a spectral model and data.
*model_plot*
A model plot object returned by Sherpa from a call like `ui.get_model_plot()`
or `ui.get_bkg_model_plot()`.
*data_plot*
A data plot object returned by Sherpa from a call like `ui.get_source_plot()`
or `ui.get_bkg_plot()`.
*desc*
Text describing the origin of the data; will be shown in the plot legend
(with "Model" and "Data" appended).
*xmin_clamp*
The smallest "x" (energy axis) value that will be plotted; default is 0.01.
This is needed to allow the plot to be shown on a logarithmic scale if
the energy axes of the model go all the way to 0.
*min_valid_x*
Either None, or the smallest "x" (energy axis) value in which the model and
data are valid; this could correspond to a range specified in the "notice"
command during analysis. If specified, a gray band will be added to the plot
showing the invalidated regions.
*max_valid_x*
Like *min_valid_x* but for the largest "x" (energy axis) value in which the
model and data are valid.
Returns:
A tuple ``(plot, xlow, xhigh)``, where *plot* an OmegaPlot RectPlot
instance, *xlow* is the left edge of the plot bounds, and *xhigh* is the
right edge of the plot bounds.
"""
import omega as om
model_x = np.concatenate((model_plot.xlo, [model_plot.xhi[-1]]))
model_x[0] = max(model_x[0], xmin_clamp)
model_y = np.concatenate((model_plot.y, [0.]))
# Sigh, sometimes Sherpa gives us bad values.
is_bad = ~np.isfinite(model_y)
if is_bad.sum():
from .cli import warn
warn('bad Sherpa model Y value(s) at: %r', np.where(is_bad)[0])
model_y[is_bad] = 0
data_left_edges = data_plot.x - 0.5 * data_plot.xerr
data_left_edges[0] = max(data_left_edges[0], xmin_clamp)
data_hist_x = np.concatenate((data_left_edges, [data_plot.x[-1] + 0.5 * data_plot.xerr[-1]]))
data_hist_y = np.concatenate((data_plot.y, [0.]))
log_bounds_pad_factor = 0.9
xlow = model_x[0] * log_bounds_pad_factor
xhigh = model_x[-1] / log_bounds_pad_factor
p = om.RectPlot()
if min_valid_x is not None:
p.add(om.rect.XBand(1e-3 * xlow, min_valid_x, keyText=None), zheight=-1, dsn=1)
if max_valid_x is not None:
p.add(om.rect.XBand(max_valid_x, xhigh * 1e3, keyText=None), zheight=-1, dsn=1)
csp = om.rect.ContinuousSteppedPainter(keyText=desc + ' Model')
csp.setFloats(model_x, model_y)
p.add(csp)
csp = om.rect.ContinuousSteppedPainter(keyText=None)
csp.setFloats(data_hist_x, data_hist_y)
p.add(csp)
p.addXYErr(data_plot.x, data_plot.y, data_plot.yerr, desc + ' Data', lines=0, dsn=1)
p.setLabels(data_plot.xlabel, data_plot.ylabel)
p.setLinLogAxes(True, False)
p.setBounds (xlow, xhigh)
return p, xlow, xhigh
def make_multi_spectrum_plots(model_plot, plotids, data_getter, desc, xmin_clamp=0.01,
min_valid_x=None, max_valid_x=None):
"""Make a plot of multiple spectral models and data.
*model_plot*
A model plot object returned by Sherpa from a call like
``ui.get_model_plot()`` or ``ui.get_bkg_model_plot()``.
*data_plots*
An iterable of data plot objects returned by Sherpa from calls like
``ui.get_source_plot(id)`` or ``ui.get_bkg_plot(id)``.
*desc*
Text describing the origin of the data; will be shown in the plot legend
(with "Model" and "Data #<number>" appended).
*xmin_clamp*
The smallest "x" (energy axis) value that will be plotted; default is 0.01.
This is needed to allow the plot to be shown on a logarithmic scale if
the energy axes of the model go all the way to 0.
*min_valid_x*
Either None, or the smallest "x" (energy axis) value in which the model and
data are valid; this could correspond to a range specified in the "notice"
command during analysis. If specified, a gray band will be added to the plot
showing the invalidated regions.
*max_valid_x*
Like *min_valid_x* but for the largest "x" (energy axis) value in which the
model and data are valid.
Returns:
A tuple ``(plot, xlow, xhigh)``, where *plot* an OmegaPlot RectPlot
instance, *xlow* is the left edge of the plot bounds, and *xhigh* is the
right edge of the plot bounds.
TODO: not happy about the code duplication with :func:`make_spectrum_plot`
but here we are.
"""
import omega as om
from omega.stamps import DataThemedStamp, WithYErrorBars
model_x = np.concatenate((model_plot.xlo, [model_plot.xhi[-1]]))
model_x[0] = max(model_x[0], xmin_clamp)
model_y = np.concatenate((model_plot.y, [0.]))
# Sigh, sometimes Sherpa gives us bad values.
is_bad = ~np.isfinite(model_y)
if is_bad.sum():
from .cli import warn
warn('bad Sherpa model Y value(s) at: %r', np.where(is_bad)[0])
model_y[is_bad] = 0
p = om.RectPlot()
data_csps = []
data_lines = []
xlow = xhigh = None
for index, plotid in enumerate(plotids):
data_plot = data_getter(plotid)
data_left_edges = data_plot.x - 0.5 * data_plot.xerr
data_left_edges[0] = max(data_left_edges[0], xmin_clamp)
data_hist_x = np.concatenate((data_left_edges, [data_plot.x[-1] + 0.5 * data_plot.xerr[-1]]))
data_hist_y = np.concatenate((data_plot.y, [0.]))
if xlow is None:
xlow = model_x[0]
xhigh = model_x[-1]
else:
xlow = min(xlow, model_x[0])
xhigh = max(xhigh, model_x[-1])
csp = om.rect.ContinuousSteppedPainter(keyText=None)
csp.setFloats(data_hist_x, data_hist_y)
data_csps.append(csp)
inner_stamp = DataThemedStamp(None)
stamp = WithYErrorBars(inner_stamp)
lines = om.rect.XYDataPainter(
lines = False,
pointStamp = stamp,
keyText = '%s Data #%d' % (desc, index)
)
lines.setFloats(data_plot.x, data_plot.y,
data_plot.y + data_plot.yerr,
data_plot.y - data_plot.yerr)
inner_stamp.setHolder(lines)
data_lines.append(lines)
log_bounds_pad_factor = 0.9
xlow *= log_bounds_pad_factor
xhigh /= log_bounds_pad_factor
if min_valid_x is not None:
p.add(om.rect.XBand(1e-3 * xlow, min_valid_x, keyText=None), zheight=-1, dsn=1)
if max_valid_x is not None:
p.add(om.rect.XBand(max_valid_x, xhigh * 1e3, keyText=None), zheight=-1, dsn=1)
model_csp = om.rect.ContinuousSteppedPainter(keyText=desc + ' Model')
model_csp.setFloats(model_x, model_y)
p.add(model_csp)
for index, (data_csp, lines) in enumerate(zip(data_csps, data_lines)):
p.add(data_csp, dsn=index + 1)
p.add(lines, dsn=index + 1)
p.setLabels(data_plot.xlabel, data_plot.ylabel) # data_plot = last one from the for loop
p.setLinLogAxes(True, False)
p.setBounds (xlow, xhigh)
return p, xlow, xhigh
| |
# -*- coding: utf-8 -*-
import flask
import functools
import logging
import requests
from .. import storage
from .. import toolkit
from . import cache
from . import config
logger = logging.getLogger(__name__)
cfg = config.load()
def is_mirror():
return bool(cfg.mirroring and cfg.mirroring.source)
def _response_headers(base):
headers = {}
if not base:
return headers
for k, v in base.iteritems():
if k.lower() == 'content-encoding':
continue
headers[k.lower()] = v
logger.warn(headers)
return headers
def lookup_source(path, stream=False, source=None):
if not source:
if not is_mirror():
return
source = cfg.mirroring.source
source_url = '{0}{1}'.format(source, path)
headers = {}
for k, v in flask.request.headers.iteritems():
if k.lower() != 'location' and k.lower() != 'host':
headers[k] = v
logger.debug('Request: GET {0}\nHeaders: {1}'.format(
source_url, headers
))
source_resp = requests.get(
source_url,
headers=headers,
cookies=flask.request.cookies,
stream=stream
)
if source_resp.status_code != 200:
logger.debug('Source responded to request with non-200'
' status')
logger.debug('Response: {0}\n{1}\n'.format(
source_resp.status_code, source_resp.text
))
return None
return source_resp
def source_lookup_tag(f):
@functools.wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
mirroring_cfg = cfg.mirroring
resp = f(namespace, repository, *args, **kwargs)
if not is_mirror():
return resp
source = mirroring_cfg.source
tags_cache_ttl = mirroring_cfg.tags_cache_ttl
if resp.status_code != 404:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
if not cache.redis_conn:
# No tags cache, just return
logger.warning('mirroring: Tags cache is disabled, please set a '
'valid `cache\' directive in the config.')
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
headers = _response_headers(source_resp.headers)
return toolkit.response(data=source_resp.content, headers=headers,
raw=True)
store = storage.load()
request_path = flask.request.path
if request_path.endswith('/tags'):
# client GETs a list of tags
tag_path = store.tag_path(namespace, repository)
else:
# client GETs a single tag
tag_path = store.tag_path(namespace, repository, kwargs['tag'])
try:
data = cache.redis_conn.get('{0}:{1}'.format(
cache.cache_prefix, tag_path
))
except cache.redis.exceptions.ConnectionError as e:
data = None
logger.warning("Diff queue: Redis connection error: {0}".format(
e
))
if data is not None:
return toolkit.response(data=data, raw=True)
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
data = source_resp.content
headers = _response_headers(source_resp.headers)
try:
cache.redis_conn.setex('{0}:{1}'.format(
cache.cache_prefix, tag_path
), tags_cache_ttl, data)
except cache.redis.exceptions.ConnectionError as e:
logger.warning("Diff queue: Redis connection error: {0}".format(
e
))
return toolkit.response(data=data, headers=headers,
raw=True)
return wrapper
def source_lookup(cache=False, stream=False, index_route=False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
mirroring_cfg = cfg.mirroring
resp = f(*args, **kwargs)
if not is_mirror():
return resp
source = mirroring_cfg.source
if index_route and mirroring_cfg.source_index:
source = mirroring_cfg.source_index
logger.debug('Source provided, registry acts as mirror')
if resp.status_code != 404:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
source_resp = lookup_source(
flask.request.path, stream=stream, source=source
)
if not source_resp:
return resp
store = storage.load()
headers = _response_headers(source_resp.headers)
if index_route and 'x-docker-endpoints' in headers:
headers['x-docker-endpoints'] = toolkit.get_endpoints()
if not stream:
logger.debug('JSON data found on source, writing response')
resp_data = source_resp.content
if cache:
store_mirrored_data(
resp_data, flask.request.url_rule.rule, kwargs,
store
)
return toolkit.response(
data=resp_data,
headers=headers,
raw=True
)
logger.debug('Layer data found on source, preparing to '
'stream response...')
layer_path = store.image_layer_path(kwargs['image_id'])
return _handle_mirrored_layer(source_resp, layer_path, store,
headers)
return wrapper
return decorator
def _handle_mirrored_layer(source_resp, layer_path, store, headers):
sr = toolkit.SocketReader(source_resp)
tmp, hndlr = storage.temp_store_handler()
sr.add_handler(hndlr)
def generate():
for chunk in sr.iterate(store.buffer_size):
yield chunk
# FIXME: this could be done outside of the request context
tmp.seek(0)
store.stream_write(layer_path, tmp)
tmp.close()
return flask.Response(generate(), headers=dict(headers))
def store_mirrored_data(data, endpoint, args, store):
logger.debug('Endpoint: {0}'.format(endpoint))
path_method, arglist = ({
'/v1/images/<image_id>/json': ('image_json_path', ('image_id',)),
'/v1/images/<image_id>/ancestry': (
'image_ancestry_path', ('image_id',)
),
'/v1/repositories/<path:repository>/json': (
'registry_json_path', ('namespace', 'repository')
),
}).get(endpoint, (None, None))
if not path_method:
return
logger.debug('Path method: {0}'.format(path_method))
pm_args = {}
for arg in arglist:
pm_args[arg] = args[arg]
logger.debug('Path method args: {0}'.format(pm_args))
storage_path = getattr(store, path_method)(**pm_args)
logger.debug('Storage path: {0}'.format(storage_path))
store.put_content(storage_path, data)
| |
import datetime
from django.conf import settings
import mock
from nose.tools import eq_
import amo
from abuse.models import AbuseReport
from amo.tasks import find_abuse_escalations, find_refund_escalations
from amo.tests import app_factory
from devhub.models import AppLog
from editors.models import EscalationQueue
from market.models import AddonPurchase, Refund
from stats.models import Contribution
from users.models import UserProfile
class TestAbuseEscalationTask(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.app = app_factory(name='XXX')
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
patcher = mock.patch.object(settings, 'TASK_USER_ID', 4043307)
patcher.start()
self.addCleanup(patcher.stop)
def test_no_abuses_no_history(self):
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_abuse_no_history(self):
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_abuse_already_escalated(self):
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_abuse_cleared_not_escalated(self):
for x in range(2):
ar = AbuseReport.objects.create(addon=self.app)
ar.created = datetime.datetime.now() - datetime.timedelta(days=1)
ar.save()
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Simulate a reviewer clearing an escalation... remove app from queue,
# and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_older_abuses_cleared_then_new(self):
for x in range(2):
ar = AbuseReport.objects.create(addon=self.app)
ar.created = datetime.datetime.now() - datetime.timedelta(days=1)
ar.save()
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Simulate a reviewer clearing an escalation... remove app from queue,
# and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# New abuse reports that come in should re-add to queue.
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_already_escalated_for_other_still_logs(self):
# Add app to queue for high refunds.
EscalationQueue.objects.create(addon=self.app)
amo.log(amo.LOG.ESCALATED_HIGH_REFUNDS, self.app,
self.app.current_version, details={'comments': 'hi refunds'})
# Set up abuses.
for x in range(2):
AbuseReport.objects.create(addon=self.app)
find_abuse_escalations(self.app.id)
# Verify it logged the high abuse reports.
action = amo.LOG.ESCALATED_HIGH_ABUSE
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
u'Expected high abuse to be logged')
class TestRefundsEscalationTask(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.app = app_factory(name='XXX')
self.user1, self.user2, self.user3 = UserProfile.objects.all()[:3]
patcher = mock.patch.object(settings, 'TASK_USER_ID', 4043307)
patcher.start()
self.addCleanup(patcher.stop)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def _purchase(self, user=None, created=None):
ap1 = AddonPurchase.objects.create(user=user or self.user1,
addon=self.app)
if created:
ap1.update(created=created)
def _refund(self, user=None, created=None):
contribution = Contribution.objects.create(addon=self.app,
user=user or self.user1)
ref = Refund.objects.create(contribution=contribution,
user=user or self.user1)
if created:
ref.update(created=created)
# Needed because these tests can run in the same second and the
# refund detection task depends on timestamp logic for when to
# escalate.
applog = AppLog.objects.all().order_by('-created', '-id')[0]
applog.update(created=created)
def test_multiple_refunds_same_user(self):
self._purchase(self.user1)
self._refund(self.user1)
self._refund(self.user1)
eq_(Refund.recent_refund_ratio(
self.app.id, datetime.datetime.now() - datetime.timedelta(days=1)),
1.0)
def test_no_refunds(self):
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_refunds(self):
self._purchase(self.user1)
self._purchase(self.user2)
self._refund(self.user1)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_refunds_already_escalated(self):
self._purchase(self.user1)
self._purchase(self.user2)
self._refund(self.user1)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
# Task was run on Refund.post_save, re-run task to make sure we don't
# escalate again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_refunds_cleared_not_escalated(self):
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
self._refund(self.user1, stamp)
# Simulate a reviewer clearing an escalation...
# remove app from queue and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All clear'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
def test_older_refund_escalations_then_new(self):
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
# Triggers 33% for refund / purchase ratio.
self._refund(self.user1, stamp)
# Simulate a reviewer clearing an escalation...
# remove app from queue and write a log.
EscalationQueue.objects.filter(addon=self.app).delete()
amo.log(amo.LOG.ESCALATION_CLEARED, self.app, self.app.current_version,
details={'comments': 'All ok'})
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Task will find it again but not add it again.
find_refund_escalations(self.app.id)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 0)
# Issue another refund, which should trigger another escalation.
self._purchase(self.user3)
self._refund(self.user3)
eq_(EscalationQueue.objects.filter(addon=self.app).count(), 1)
def test_already_escalated_for_other_still_logs(self):
# Add app to queue for abuse reports.
EscalationQueue.objects.create(addon=self.app)
amo.log(amo.LOG.ESCALATED_HIGH_ABUSE, self.app,
self.app.current_version, details={'comments': 'abuse'})
# Set up purchases.
stamp = datetime.datetime.now() - datetime.timedelta(days=2)
self._purchase(self.user1, stamp)
self._purchase(self.user2, stamp)
# Triggers 33% for refund / purchase ratio.
self._refund(self.user1, stamp)
# Verify it logged the high refunds.
action = amo.LOG.ESCALATED_HIGH_REFUNDS
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
u'Expected high refunds to be logged')
| |
#!/usr/bin/python
import sys, getopt, copy, pprint, traceback, ast
from pymongo import MongoClient
from fluxpy import models
from fluxpy import mediators
from fluxpy.mediators import *
usage_hdr = """
manage.py [COMMAND] [REQUIRED ARGS FOR COMMAND] [OPTIONAL ARGS FOR COMMAND]
Commands:
load Loads data
remove Removes data
rename Renames data collections
db Database diagnostic tools, incl. listing all
collections, viewing collection metadata, etc.
"""
usage_load = """
manage.py load
Usage:
manage.py load -p <filepath> -m <model> -n <collection_name> [OPTIONAL ARGS]
Required arguments:
-p, --path Directory path of input file in Matlab (*.mat)
or HDF5 (*.h5 or *.mat) format
-n, --collection_name Provide a unique name for the dataset by which
it will be identified in the MongoDB
-m, --model fluxpy/models.py model associated with the
input dataset
Optional arguments:
-c, --config_file Specify location of json config file. By
default, seeks input file w/ .json extension.
-o, --options Use to override specifications in the config file.
Syntax: -o "parameter1=value1;parameter2=value2;parameter3=value3"
e.g.: -o "title=MyData;gridres={'units':'degrees,'x':1.0,'y':1.0}"
Examples:
python manage.py load -p ./data_casa_gfed.mat -m SpatioTemporalMatrix -n casa_gfed_2004
In the following example, the program will look for a config file
at ~/data_casa_gfed.json and overwrite the timestamp and var_name
specifications in that file with those provided as command line args:
python manage.py load -p ./data_casa_gfed.mat -m SpatioTemporalMatrix -n casa_gfed_2004 -o "timestamp=2003-12-22T03:00:00;var_name=casa_gfed_2004"
"""
usage_remove = """
manage.py remove
Usage:
manage.py remove -n <collection_name>
Required argument:
-n, --collection_name Collection name to be removed (MongoDB identifier)
Example:
python manage.py remove -n casa_gfed_2004
"""
usage_rename = """
manage.py rename
Usage:
manage.py rename -n <collection_name> -r <new_name>
Required arguments:
-n, --collection_name Collection name to be removed (MongoDB identifier)
-r, --new_name New name for the collection
Example:
python manage.py rename -n casa_gfed_2004 -r casa_2004
"""
usage_db = """
manage.py db
Usage:
manage.py db [OPTIONAL ARGUMENTS]
Requires one of the following flags:
-l, --list_ids Lists collection names in the database.
Optional args with -l flag:
collections : lists collections
metadata: lists the collections w/ metadata entries
coord_index: lists the collections w/ coord_index entries
-n, --collection_name Collection name for which to shows metadata
-a, --audit No argument required. Performs audit of the
database, reporting any collections that are
missing corresponding metadata/coord_index
entries and any stale metadata/coord_index
entries without corresponding collections
Optional argument:
-x, --include_counts Include count of records within each listed
collection. Valid only with a corresponding
"-l collections" flag; ignored otherwise
Examples:
List all collections and their number of records:
python manage.py db -l collections -x
List all the collections with metadata entries:
python manage.py db -l metadata
Show metadata for the collection with id "casa_gfed_2004":
python manage.py db -n casa_gfed_2004
Audit the database:
python manage.py db -a
"""
usage_all = ('\n' + '-'*30).join([usage_hdr,usage_load,usage_remove,usage_rename,usage_db])
# map of valid options (and whether or not they are required) for each command
# -one current naivete: this setup assumes all boolean options are not required, which just happens to be the case (for now)
commands = {
'load' : {'path': True,
'model': True,
'mediator': False,
'collection_name': True,
'options': False,
'config_file': False},
'remove': {'collection_name': True},
'rename': {'collection_name': True,
'new_name': True},
'db': {'list_ids': False,
'collection_name': False,
'include_counts': False,
'audit': False},
}
# lists all possible options (for ALL commands) and their corresponding short flags
# colons (:) indicate that option must be followed by an argument
options = {'help': 'h',
'path': 'p:',
'mediator': 'd:',
'model': 'm:',
'collection_name': 'n:',
'new_name': 'r:',
'config_file': 'c:',
'options': 'o:',
'include_counts': 'x',
'list_ids': 'l:',
'audit': 'a'}
# useful variables built from the options dict
opt_pairs = [('--' + o[0], '-' + o[1].rstrip(':')) for o in options.items()]
bool_opts = [o for o in options if ':' not in options[o]]
optstring_short = ''.join(options.values())
optstring_long = [k + ('=' if ':' in options[k] else '') for k in options]
def main(argv):
"""
Parses command line options/arguments and reroutes to the appropriate
function.
"""
command = argv[0]
if command not in commands:
if command not in ['-h', '--help']:
print "\n'{0}' is not a valid command".format(command)
print usage_hdr
print '\nFor detailed usage info on a specific command, use:\n' \
'manage.py [COMMAND] -h\n\nFor full help, use:\nmanage.py -h'
else:
print usage_all
sys.exit(2)
kwargs = copy.copy(commands[command])
try:
opts, args = getopt.getopt(argv[1:],optstring_short,optstring_long)
except getopt.GetoptError, exc:
print '\n' + exc.msg
print "\nFor detailed usage info for the '{0}' command, use:\n" \
"python manage.py {0} -h\n" \
"\nFor detailed usage info for manage.py in general, use:\n" \
"python manage.py -h\n".format(command)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print globals()['usage_' + command]
sys.exit()
else:
for item in opt_pairs:
if opt in item:
kwarg = item[0].lstrip('--')
# set value to the arg if its an opt that requires an arg
# ...if not, set it as a 'True' flag
val = True if kwarg in bool_opts else arg
kwargs[kwarg] = val
for kwarg in kwargs:
if kwargs[kwarg] == True and kwarg not in bool_opts:
print "\nRequired argument for command '{0}' is missing: --{1}" \
"\nFor detailed usage info, use:" \
"\npython manage.py -h\n".format(command,kwarg)
sys.exit(2)
if kwarg not in commands[command]:
print "\nFYI: An options was provided that is invalid for " \
"the '{0}' command: {1}\nCommand will run but options " \
"will be ignored...".format(command,kwarg)
# call the requested function
globals()['_' + command](**kwargs)
def _load(path, model, collection_name, mediator=None, **kwargs):
"""
Uploads data to MongoDB using given model and mediator
"""
# parse any config options into individual kwarg entries:
if kwargs['options']:
tmp = kwargs['options'].split(';')
for o in tmp:
tmp2 = o.split('=')
if tmp2[0] in ['timestamp','title','var_name']: # evaluate strings as strings
kwargs[tmp2[0]] = tmp2[1]
else: # for dict/array values, evaluate string literally
kwargs[tmp2[0]] = ast.literal_eval(tmp2[1])
# load the data/instantiate the model
inst = getattr(models, model)(path=path,
collection_name=collection_name,
**kwargs)
# now use mediator to save to db
default_mediators = {'SpatioTemporalMatrix': mediators.Grid4DMediator,
'XCO2Matrix': mediators.Unstructured3DMediator,
'KrigedXCO2Matrix': mediators.Grid3DMediator,
}
if not mediator:
mediator = default_mediators[model]
mediator().save(collection_name, inst, verbose=True)
sys.stderr.write('\nUpload complete!\n')
def _remove(collection_name):
"""
Removes specified collection from database as well as its corresponding
entries in metadata and coord_index tables.
"""
db = _open_db_connection()
if (collection_name in db.collection_names() or
collection_name in _return_id_list(db,'metadata') or
collection_name in _return_id_list(db,'coord_index') or
'_geom_' + collection_name in db.collection_names()):
db[collection_name].drop()
db['metadata'].remove({'_id': collection_name})
db['coord_index'].remove({'_id': collection_name})
db['_geom_' + collection_name].drop()
print '\nCollection ID "{0}" successfully removed from ' \
'database!\n'.format(collection_name)
else:
print '\nCollection ID "{0}" does not exist. ' \
'Existing collections include:'.format(collection_name)
_db(list_ids='collections')
def _rename(collection_name,new_name):
"""
Renames specified collection with the specified new name.
"""
print '\nRenaming collection ID "{0}" to "{1}"...'.format(collection_name,new_name)
db = _open_db_connection()
db[collection_name].rename(new_name)
# update the metadata and coord_index collections
for col in ['metadata','coord_index']:
# this is messy b/c '_id' field cannot be renamed w/in the database
# ...further, inserting a copy with an altered name fails bc the _id's
# index cannot be changed
# ...further, we don't want to remove the entry and THEN reindex in
# case something fails bc the data will not be able to be recovered
# ...so, we have to first store a copy of the collection in the db as
# it exists before we do anything, then do the stuff.
# first create a temporary backup of the collection in case something fails
orig_name = col + '_orig'
for x in db[col].find():
db[orig_name].insert(x)
# now attempt the rename
try:
document = db[col].find_one({'_id': collection_name})
document['_id'] = new_name
db[col].remove({'_id': collection_name})
db[col].insert(document)
except:
print 'Rename FAILED; restoring "{0}" table'.format(col)
db[col].drop()
for x in db[orig_name].find():
db[col].insert(x)
traceback.print_exc()
# and finally remove the temporary backup collection
db[orig_name].drop()
print 'Complete.\n'
def _db(list_ids=None,collection_name=None,audit=None,include_counts=False):
"""
Sub-parser for the db command- checks valid args and calls
appropriate functions
"""
print
list_valid_values = ['collections','metadata','coord_index','c','m','i']
if list_ids:
if list_ids in list_valid_values:
if list_ids not in ['collections','c',''] and include_counts:
print 'Option -x (for including record counts) is not valid ' \
'for the {0} argument, ignoring.'.format(list_ids)
_list(list_ids=list_ids,include_counts=include_counts)
else:
print 'Invalid argument for [-l | --list]: {0}' \
'\nValid values include:'.format(list_ids)
print list_valid_values
sys.exit(2)
if collection_name:
_show_metadata(collection_name=collection_name)
if audit:
_audit()
print
def _list(list_ids='collections',include_counts=False):
"""
Database diagnostic tool for listing collection IDs
"""
db = _open_db_connection()
if list_ids in ['collections','']:
for c in db.collection_names():
if c not in (RESERVED_COLLECTION_NAMES + ('system.indexes',)) and ('_geom' not in c):
print c + (' (%i' % db[c].count() + ' records)' if include_counts else '')
else:
for id in _return_id_list(db,list_ids):
print id
def _return_id_list(db,collection_name):
"""
Returns list of '_id' entries for the collection name provided
"""
return [t['_id'] for t in list(db[collection_name].find())]
def _show_metadata(collection_name=None):
"""
Database diagnostic tool for showing metadata for a specified
collection ID
"""
db = _open_db_connection()
# View the metadata record of a given collection (by "_id")
document = db['metadata'].find({'_id': collection_name})
if document.count() > 0:
pprint.pprint ((db['metadata'].find({'_id': collection_name}))[0])
else:
print 'Metadata for collection ID "{0}" not found.' \
'\nTo view a list of collection IDs having metadata, use:' \
'\nmanage.py db -l metadata'.format(collection_name)
def _audit():
"""
Database diagnostic tool for auditing the database. Tests for
synchronicity between collections and the collection ID entries for
the metadata and coord_index tables.
"""
db = _open_db_connection()
existing_collections = [c for c in db.collection_names() if (c not in
RESERVED_COLLECTION_NAMES + ('system.indexes',)) and ('_geom' not in c)]
all_good = True
for x in ['metadata','coord_index']:
m_entries = [m['_id'] for m in db[x].find()]
for c in existing_collections:
if c not in m_entries:
print 'INCONSISTENCY FOUND: missing {0} entry for collection ID "{1}"'.format(x,c)
all_good = False
for m in m_entries:
if m not in existing_collections:
print 'INCONSISTENCY FOUND: stale entry for collection ID "{0}" in {1}'.format(m,x)
all_good = False
print '\nDatabase audit complete.\n',
if all_good: print 'No inconsistencies found!'
def _open_db_connection():
"""
Opens Mongo client connection with the local database
"""
client = MongoClient()
return client[DB]
if __name__ == "__main__":
main(sys.argv[1:])
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provides the Bokeh Server Tornado application.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
from pprint import pformat
# External imports
from tornado import gen
from tornado.ioloop import PeriodicCallback
from tornado.web import Application as TornadoApplication
from tornado.web import StaticFileHandler
# Bokeh imports
from ..application import Application
from ..resources import Resources
from ..settings import settings
from ..util.dependencies import import_optional
from .contexts import ApplicationContext
from .connection import ServerConnection
from .urls import per_app_patterns, toplevel_patterns
from .views.root_handler import RootHandler
from .views.static_handler import StaticHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Unfortuntely we can't yet use format_docstring to keep these automatically in sync in
# the class docstring because Python 2 does not allow setting class.__doc__ (works with
# Bokeh model classes because they are metaclasses)
DEFAULT_CHECK_UNUSED_MS = 17000
DEFAULT_KEEP_ALIVE_MS = 37000 # heroku, nginx default to 60s timeout, so use less than that
DEFAULT_MEM_LOG_FREQ_MS = 0
DEFAULT_STATS_LOG_FREQ_MS = 15000
DEFAULT_UNUSED_LIFETIME_MS = 15000
DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES = 20*1024*1024
__all__ = (
'BokehTornado',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class BokehTornado(TornadoApplication):
''' A Tornado Application used to implement the Bokeh Server.
Args:
applications (dict[str,Application] or Application) :
A map from paths to ``Application`` instances.
If the value is a single Application, then the following mapping
is generated:
.. code-block:: python
applications = { '/' : applications }
When a connection comes in to a given path, the associate
Application is used to generate a new document for the session.
prefix (str, optional) :
A URL prefix to use for all Bokeh server paths. (default: None)
extra_websocket_origins (list[str], optional) :
A list of hosts that can connect to the websocket.
This is typically required when embedding a Bokeh server app in an
external web site using :func:`~bokeh.embed.server_document` or
similar.
If None, ``["localhost"]`` will be assumed (default: None)
extra_patterns (seq[tuple], optional) :
A list of tuples of (str, http or websocket handler)
Use this argument to add additional endpoints to custom deployments
of the Bokeh Server.
If None, then ``[]`` will be used. (default: None)
secret_key (str, optional) :
A secret key for signing session IDs.
Defaults to the current value of the environment variable
``BOKEH_SECRET_KEY``
sign_sessions (bool, optional) :
Whether to cryptographically sign session IDs
Defaults to the current value of the environment variable
``BOKEH_SIGN_SESSIONS``. If ``True``, then ``secret_key`` must
also be provided (either via environment setting or passed as
a parameter value)
generate_session_ids (bool, optional) :
Whether to generate a session ID if one is not provided
(default: True)
keep_alive_milliseconds (int, optional) :
Number of milliseconds between keep-alive pings (default: 37000)
Pings normally required to keep the websocket open. Set to 0 to
disable pings.
check_unused_sessions_milliseconds (int, optional) :
Number of milliseconds between checking for unused sessions
(default: 17000)
unused_session_lifetime_milliseconds (int, optional) :
Number of milliseconds for unused session lifetime (default: 15000)
stats_log_frequency_milliseconds (int, optional) :
Number of milliseconds between logging stats (default: 15000)
mem_log_frequency_milliseconds (int, optional) :
Number of milliseconds between logging memory information (default: 0)
Enabling this feature requires the optional dependency ``psutil`` to be
installed.
use_index (bool, optional) :
Whether to generate an index of running apps in the ``RootHandler``
(default: True)
index (str, optional) :
Path to a Jinja2 template to serve as the index for "/" if use_index
is True. If None, the basic built in app index template is used.
(default: None)
redirect_root (bool, optional) :
When there is only a single running application, whether to
redirect requests to ``"/"`` to that application automatically
(default: True)
If there are multiple Bokeh applications configured, this option
has no effect.
websocket_max_message_size_bytes (int, optional):
Set the Tornado ``websocket_max_message_size`` value.
(default: 20*1024*1024)
NOTE: This setting has effect ONLY for Tornado>=4.5
Any additional keyword arguments are passed to ``tornado.web.Application``.
'''
def __init__(self,
applications,
prefix=None,
extra_websocket_origins=None,
extra_patterns=None,
secret_key=settings.secret_key_bytes(),
sign_sessions=settings.sign_sessions(),
generate_session_ids=True,
keep_alive_milliseconds=DEFAULT_KEEP_ALIVE_MS,
check_unused_sessions_milliseconds=DEFAULT_CHECK_UNUSED_MS,
unused_session_lifetime_milliseconds=DEFAULT_UNUSED_LIFETIME_MS,
stats_log_frequency_milliseconds=DEFAULT_STATS_LOG_FREQ_MS,
mem_log_frequency_milliseconds=DEFAULT_MEM_LOG_FREQ_MS,
use_index=True,
redirect_root=True,
websocket_max_message_size_bytes=DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES,
index=None,
**kwargs):
# This will be set when initialize is called
self._loop = None
if isinstance(applications, Application):
applications = { '/' : applications }
if prefix is None:
prefix = ""
prefix = prefix.strip("/")
if prefix:
prefix = "/" + prefix
self._prefix = prefix
self._index = index
if keep_alive_milliseconds < 0:
# 0 means "disable"
raise ValueError("keep_alive_milliseconds must be >= 0")
else:
if keep_alive_milliseconds == 0:
log.info("Keep-alive ping disabled")
elif keep_alive_milliseconds != DEFAULT_KEEP_ALIVE_MS:
log.info("Keep-alive ping configured every %d milliseconds", keep_alive_milliseconds)
self._keep_alive_milliseconds = keep_alive_milliseconds
if check_unused_sessions_milliseconds <= 0:
raise ValueError("check_unused_sessions_milliseconds must be > 0")
elif check_unused_sessions_milliseconds != DEFAULT_CHECK_UNUSED_MS:
log.info("Check for unused sessions every %d milliseconds", check_unused_sessions_milliseconds)
self._check_unused_sessions_milliseconds = check_unused_sessions_milliseconds
if unused_session_lifetime_milliseconds <= 0:
raise ValueError("check_unused_sessions_milliseconds must be > 0")
elif unused_session_lifetime_milliseconds != DEFAULT_UNUSED_LIFETIME_MS:
log.info("Unused sessions last for %d milliseconds", unused_session_lifetime_milliseconds)
self._unused_session_lifetime_milliseconds = unused_session_lifetime_milliseconds
if stats_log_frequency_milliseconds <= 0:
raise ValueError("stats_log_frequency_milliseconds must be > 0")
elif stats_log_frequency_milliseconds != DEFAULT_STATS_LOG_FREQ_MS:
log.info("Log statistics every %d milliseconds", stats_log_frequency_milliseconds)
self._stats_log_frequency_milliseconds = stats_log_frequency_milliseconds
if mem_log_frequency_milliseconds < 0:
# 0 means "disable"
raise ValueError("mem_log_frequency_milliseconds must be >= 0")
elif mem_log_frequency_milliseconds > 0:
if import_optional('psutil') is None:
log.warning("Memory logging requested, but is disabled. Optional dependency 'psutil' is missing. "
"Try 'pip install psutil' or 'conda install psutil'")
mem_log_frequency_milliseconds = 0
elif mem_log_frequency_milliseconds != DEFAULT_MEM_LOG_FREQ_MS:
log.info("Log memory usage every %d milliseconds", mem_log_frequency_milliseconds)
self._mem_log_frequency_milliseconds = mem_log_frequency_milliseconds
if websocket_max_message_size_bytes <= 0:
raise ValueError("websocket_max_message_size_bytes must be postitive")
elif websocket_max_message_size_bytes != DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES:
log.info("Torndado websocket_max_message_size set to %d bytes (%0.2f MB)",
websocket_max_message_size_bytes,
websocket_max_message_size_bytes/1024.0**2)
if extra_websocket_origins is None:
self._websocket_origins = set()
else:
self._websocket_origins = set(extra_websocket_origins)
self._secret_key = secret_key
self._sign_sessions = sign_sessions
self._generate_session_ids = generate_session_ids
log.debug("These host origins can connect to the websocket: %r", list(self._websocket_origins))
# Wrap applications in ApplicationContext
self._applications = dict()
for k,v in applications.items():
self._applications[k] = ApplicationContext(v,url=k)
extra_patterns = extra_patterns or []
all_patterns = []
for key, app in applications.items():
app_patterns = []
for p in per_app_patterns:
if key == "/":
route = p[0]
else:
route = key + p[0]
route = self._prefix + route
app_patterns.append((route, p[1], { "application_context" : self._applications[key] }))
websocket_path = None
for r in app_patterns:
if r[0].endswith("/ws"):
websocket_path = r[0]
if not websocket_path:
raise RuntimeError("Couldn't find websocket path")
for r in app_patterns:
r[2]["bokeh_websocket_path"] = websocket_path
all_patterns.extend(app_patterns)
# add a per-app static path if requested by the application
if app.static_path is not None:
if key == "/":
route = "/static/(.*)"
else:
route = key + "/static/(.*)"
route = self._prefix + route
all_patterns.append((route, StaticFileHandler, { "path" : app.static_path }))
for p in extra_patterns + toplevel_patterns:
if p[1] == RootHandler:
if use_index:
data = {"applications": self._applications,
"prefix": self._prefix,
"index": self._index,
"use_redirect": redirect_root}
prefixed_pat = (self._prefix + p[0],) + p[1:] + (data,)
all_patterns.append(prefixed_pat)
else:
prefixed_pat = (self._prefix + p[0],) + p[1:]
all_patterns.append(prefixed_pat)
log.debug("Patterns are:")
for line in pformat(all_patterns, width=60).split("\n"):
log.debug(" " + line)
super(BokehTornado, self).__init__(all_patterns,
websocket_max_message_size=websocket_max_message_size_bytes,
**kwargs)
def initialize(self, io_loop):
''' Start a Bokeh Server Tornado Application on a given Tornado IOLoop.
'''
self._loop = io_loop
for app_context in self._applications.values():
app_context._loop = self._loop
self._clients = set()
self._stats_job = PeriodicCallback(self._log_stats,
self._stats_log_frequency_milliseconds)
if self._mem_log_frequency_milliseconds > 0:
self._mem_job = PeriodicCallback(self._log_mem,
self._mem_log_frequency_milliseconds)
else:
self._mem_job = None
self._cleanup_job = PeriodicCallback(self._cleanup_sessions,
self._check_unused_sessions_milliseconds)
if self._keep_alive_milliseconds > 0:
self._ping_job = PeriodicCallback(self._keep_alive, self._keep_alive_milliseconds)
else:
self._ping_job = None
@property
def app_paths(self):
''' A list of all application paths for all Bokeh applications
configured on this Bokeh server instance.
'''
return set(self._applications)
@property
def index(self):
''' Path to a Jinja2 template to serve as the index "/"
'''
return self._index
@property
def io_loop(self):
''' The Tornado IOLoop that this Bokeh Server Tornado Application
is running on.
'''
return self._loop
@property
def prefix(self):
''' A URL prefix for this Bokeh Server Tornado Application to use
for all paths
'''
return self._prefix
@property
def websocket_origins(self):
''' A list of websocket origins permitted to connect to this server.
'''
return self._websocket_origins
@property
def secret_key(self):
''' A secret key for this Bokeh Server Tornado Application to use when
signing session IDs, if configured.
'''
return self._secret_key
@property
def sign_sessions(self):
''' Whether this Bokeh Server Tornado Application has been configured
to cryptographically sign session IDs
If ``True``, then ``secret_key`` must also have been configured.
'''
return self._sign_sessions
@property
def generate_session_ids(self):
''' Whether this Bokeh Server Tornado Application has been configured
to automatically generate session IDs.
'''
return self._generate_session_ids
def resources(self, absolute_url=None):
''' Provide a :class:`~bokeh.resources.Resources` that specifies where
Bokeh application sessions should load BokehJS resources from.
Args:
absolute_url (bool):
An absolute URL prefix to use for locating resources. If None,
relative URLs are used (default: None)
'''
if absolute_url:
return Resources(mode="server", root_url=absolute_url + self._prefix, path_versioner=StaticHandler.append_version)
return Resources(mode="server", root_url=self._prefix, path_versioner=StaticHandler.append_version)
def start(self):
''' Start the Bokeh Server application.
Starting the Bokeh Server Tornado application will run periodic
callbacks for stats logging, cleanup, pinging, etc. Additionally, any
startup hooks defined by the configured Bokeh applications will be run.
'''
self._stats_job.start()
if self._mem_job is not None:
self._mem_job.start()
self._cleanup_job.start()
if self._ping_job is not None:
self._ping_job.start()
for context in self._applications.values():
context.run_load_hook()
def stop(self, wait=True):
''' Stop the Bokeh Server application.
Args:
wait (bool): whether to wait for orderly cleanup (default: True)
Returns:
None
'''
# TODO should probably close all connections and shut down all sessions here
for context in self._applications.values():
context.run_unload_hook()
self._stats_job.stop()
if self._mem_job is not None:
self._mem_job.stop()
self._cleanup_job.stop()
if self._ping_job is not None:
self._ping_job.stop()
self._clients.clear()
def new_connection(self, protocol, socket, application_context, session):
connection = ServerConnection(protocol, socket, application_context, session)
self._clients.add(connection)
return connection
def client_lost(self, connection):
self._clients.discard(connection)
connection.detach_session()
def get_session(self, app_path, session_id):
''' Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession
'''
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return self._applications[app_path].get_session(session_id)
def get_sessions(self, app_path):
''' Gets all currently active sessions for an application.
Args:
app_path (str) :
The configured application path for the application to return
sessions for.
Returns:
list[ServerSession]
'''
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return list(self._applications[app_path].sessions)
# Periodic Callbacks ------------------------------------------------------
@gen.coroutine
def _cleanup_sessions(self):
log.trace("Running session cleanup job")
for app in self._applications.values():
yield app._cleanup_sessions(self._unused_session_lifetime_milliseconds)
raise gen.Return(None)
def _log_stats(self):
log.trace("Running stats log job")
if log.getEffectiveLevel() > logging.DEBUG:
# avoid the work below if we aren't going to log anything
return
log.debug("[pid %d] %d clients connected", os.getpid(), len(self._clients))
for app_path, app in self._applications.items():
sessions = list(app.sessions)
unused_count = 0
for s in sessions:
if s.connection_count == 0:
unused_count += 1
log.debug("[pid %d] %s has %d sessions with %d unused",
os.getpid(), app_path, len(sessions), unused_count)
def _log_mem(self):
import psutil
process = psutil.Process(os.getpid())
log.info("[pid %d] Memory usage: %0.2f MB (RSS), %0.2f MB (VMS)", os.getpid(), process.memory_info().rss//2**20, process.memory_info().vms//2**20)
if log.getEffectiveLevel() > logging.DEBUG:
# avoid the work below if we aren't going to log anything else
return
import gc
from ..document import Document
from ..model import Model
from .session import ServerSession
for name, typ in [('Documents', Document), ('Sessions', ServerSession), ('Models', Model)]:
objs = [x for x in gc.get_objects() if isinstance(x, typ)]
log.debug(" uncollected %s: %d", name, len(objs))
def _keep_alive(self):
log.trace("Running keep alive job")
for c in self._clients:
c.send_ping()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
"""Provides methods to bootstrap a home assistant instance."""
import asyncio
import logging
import logging.handlers
import os
import sys
from collections import OrderedDict
from types import ModuleType
from typing import Any, Optional, Dict
import voluptuous as vol
from voluptuous.humanize import humanize_error
import homeassistant.components as core_components
from homeassistant.components import persistent_notification
import homeassistant.config as conf_util
import homeassistant.core as core
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
import homeassistant.loader as loader
import homeassistant.util.package as pkg_util
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.const import EVENT_COMPONENT_LOADED, PLATFORM_FORMAT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
event_decorators, service, config_per_platform, extract_domain_configs)
from homeassistant.helpers.signal import async_register_signal_handling
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = 'component'
ERROR_LOG_FILENAME = 'home-assistant.log'
DATA_PERSISTENT_ERRORS = 'bootstrap_persistent_errors'
HA_COMPONENT_URL = '[{}](https://home-assistant.io/components/{}/)'
def setup_component(hass: core.HomeAssistant, domain: str,
config: Optional[Dict]=None) -> bool:
"""Setup a component and all its dependencies."""
return run_coroutine_threadsafe(
async_setup_component(hass, domain, config), loop=hass.loop).result()
@asyncio.coroutine
def async_setup_component(hass: core.HomeAssistant, domain: str,
config: Optional[Dict]=None) -> bool:
"""Setup a component and all its dependencies.
This method is a coroutine.
"""
if domain in hass.config.components:
_LOGGER.debug('Component %s already set up.', domain)
return True
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
if config is None:
config = {}
components = loader.load_order_component(domain)
# OrderedSet is empty if component or dependencies could not be resolved
if not components:
_async_persistent_notification(hass, domain, True)
return False
for component in components:
res = yield from _async_setup_component(hass, component, config)
if not res:
_LOGGER.error('Component %s failed to setup', component)
_async_persistent_notification(hass, component, True)
return False
return True
def _handle_requirements(hass: core.HomeAssistant, component,
name: str) -> bool:
"""Install the requirements for a component.
This method needs to run in an executor.
"""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('deps')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
_async_persistent_notification(hass, name)
return False
return True
@asyncio.coroutine
def _async_setup_component(hass: core.HomeAssistant,
domain: str, config) -> bool:
"""Setup a component for Home Assistant.
This method is a coroutine.
"""
# pylint: disable=too-many-return-statements
if domain in hass.config.components:
return True
setup_lock = hass.data.get('setup_lock')
if setup_lock is None:
setup_lock = hass.data['setup_lock'] = asyncio.Lock(loop=hass.loop)
setup_progress = hass.data.get('setup_progress')
if setup_progress is None:
setup_progress = hass.data['setup_progress'] = []
if domain in setup_progress:
_LOGGER.error('Attempt made to setup %s during setup of %s',
domain, domain)
_async_persistent_notification(hass, domain, True)
return False
try:
# Used to indicate to discovery that a setup is ongoing and allow it
# to wait till it is done.
did_lock = False
if not setup_lock.locked():
yield from setup_lock.acquire()
did_lock = True
setup_progress.append(domain)
config = yield from async_prepare_setup_component(hass, config, domain)
if config is None:
return False
component = loader.get_component(domain)
if component is None:
_async_persistent_notification(hass, domain)
return False
async_comp = hasattr(component, 'async_setup')
try:
_LOGGER.info("Setting up %s", domain)
if async_comp:
result = yield from component.async_setup(hass, config)
else:
result = yield from hass.loop.run_in_executor(
None, component.setup, hass, config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error during setup of component %s', domain)
_async_persistent_notification(hass, domain, True)
return False
if result is False:
_LOGGER.error('component %s failed to initialize', domain)
_async_persistent_notification(hass, domain, True)
return False
elif result is not True:
_LOGGER.error('component %s did not return boolean if setup '
'was successful. Disabling component.', domain)
_async_persistent_notification(hass, domain, True)
loader.set_component(domain, None)
return False
hass.config.components.add(component.DOMAIN)
hass.bus.async_fire(
EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN}
)
return True
finally:
setup_progress.remove(domain)
if did_lock:
setup_lock.release()
def prepare_setup_component(hass: core.HomeAssistant, config: dict,
domain: str):
"""Prepare setup of a component and return processed config."""
return run_coroutine_threadsafe(
async_prepare_setup_component(hass, config, domain), loop=hass.loop
).result()
@asyncio.coroutine
def async_prepare_setup_component(hass: core.HomeAssistant, config: dict,
domain: str):
"""Prepare setup of a component and return processed config.
This method is a coroutine.
"""
# pylint: disable=too-many-return-statements
component = loader.get_component(domain)
missing_deps = [dep for dep in getattr(component, 'DEPENDENCIES', [])
if dep not in hass.config.components]
if missing_deps:
_LOGGER.error(
'Not initializing %s because not all dependencies loaded: %s',
domain, ", ".join(missing_deps))
return None
if hasattr(component, 'CONFIG_SCHEMA'):
try:
config = component.CONFIG_SCHEMA(config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
return None
elif hasattr(component, 'PLATFORM_SCHEMA'):
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component.PLATFORM_SCHEMA(p_config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
platform = yield from async_prepare_setup_platform(
hass, config, domain, p_name)
if platform is None:
continue
# Validate platform specific schema
if hasattr(platform, 'PLATFORM_SCHEMA'):
try:
# pylint: disable=no-member
p_validated = platform.PLATFORM_SCHEMA(p_validated)
except vol.Invalid as ex:
async_log_exception(ex, '{}.{}'.format(domain, p_name),
p_validated, hass)
continue
platforms.append(p_validated)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
filter_keys = extract_domain_configs(config, domain)
config = {key: value for key, value in config.items()
if key not in filter_keys}
config[domain] = platforms
res = yield from hass.loop.run_in_executor(
None, _handle_requirements, hass, component, domain)
if not res:
return None
return config
def prepare_setup_platform(hass: core.HomeAssistant, config, domain: str,
platform_name: str) -> Optional[ModuleType]:
"""Load a platform and makes sure dependencies are setup."""
return run_coroutine_threadsafe(
async_prepare_setup_platform(hass, config, domain, platform_name),
loop=hass.loop
).result()
@asyncio.coroutine
def async_prepare_setup_platform(hass: core.HomeAssistant, config, domain: str,
platform_name: str) \
-> Optional[ModuleType]:
"""Load a platform and makes sure dependencies are setup.
This method is a coroutine.
"""
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
platform_path = PLATFORM_FORMAT.format(domain, platform_name)
platform = loader.get_platform(domain, platform_name)
# Not found
if platform is None:
_LOGGER.error('Unable to find platform %s', platform_path)
_async_persistent_notification(hass, platform_path)
return None
# Already loaded
elif platform_path in hass.config.components:
return platform
# Load dependencies
for component in getattr(platform, 'DEPENDENCIES', []):
if component in loader.DEPENDENCY_BLACKLIST:
raise HomeAssistantError(
'{} is not allowed to be a dependency.'.format(component))
res = yield from async_setup_component(hass, component, config)
if not res:
_LOGGER.error(
'Unable to prepare setup for platform %s because '
'dependency %s could not be initialized', platform_path,
component)
_async_persistent_notification(hass, platform_path, True)
return None
res = yield from hass.loop.run_in_executor(
None, _handle_requirements, hass, platform, platform_path)
if not res:
return None
return platform
def from_config_dict(config: Dict[str, Any],
hass: Optional[core.HomeAssistant]=None,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
mount_local_lib_path(config_dir)
@asyncio.coroutine
def _async_init_from_config_dict(future):
try:
re_hass = yield from async_from_config_dict(
config, hass, config_dir, enable_log, verbose, skip_pip,
log_rotate_days)
future.set_result(re_hass)
# pylint: disable=broad-except
except Exception as exc:
future.set_exception(exc)
# run task
future = asyncio.Future(loop=hass.loop)
hass.async_add_job(_async_init_from_config_dict(future))
hass.loop.run_until_complete(future)
return future.result()
@asyncio.coroutine
def async_from_config_dict(config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
hass.async_track_tasks()
setup_lock = hass.data.get('setup_lock')
if setup_lock is None:
setup_lock = hass.data['setup_lock'] = asyncio.Lock(loop=hass.loop)
yield from setup_lock.acquire()
core_config = config.get(core.DOMAIN, {})
try:
yield from conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as ex:
async_log_exception(ex, 'homeassistant', core_config, hass)
return None
yield from hass.loop.run_in_executor(
None, conf_util.process_ha_config_upgrade, hass)
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning('Skipping pip installation of required modules. '
'This may cause issues.')
if not loader.PREPARED:
yield from hass.loop.run_in_executor(None, loader.prepare, hass)
# Merge packages
conf_util.merge_packages_config(
config, core_config.get(conf_util.CONF_PACKAGES, {}))
# Make a copy because we are mutating it.
# Use OrderedDict in case original one was one.
# Convert values to dictionaries if they are None
new_config = OrderedDict()
for key, value in config.items():
new_config[key] = value or {}
config = new_config
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
# setup components
# pylint: disable=not-an-iterable
res = yield from core_components.async_setup(hass, config)
if not res:
_LOGGER.error('Home Assistant core failed to initialize. '
'Further initialization aborted.')
return hass
yield from persistent_notification.async_setup(hass, config)
_LOGGER.info('Home Assistant core initialized')
# Give event decorators access to HASS
event_decorators.HASS = hass
service.HASS = hass
# Setup the components
dependency_blacklist = loader.DEPENDENCY_BLACKLIST - set(components)
for domain in loader.load_order_components(components):
if domain in dependency_blacklist:
raise HomeAssistantError(
'{} is not allowed to be a dependency'.format(domain))
yield from _async_setup_component(hass, domain, config)
setup_lock.release()
yield from hass.async_stop_track_tasks()
async_register_signal_handling(hass)
return hass
def from_config_file(config_path: str,
hass: Optional[core.HomeAssistant]=None,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
@asyncio.coroutine
def _async_init_from_config_file(future):
try:
re_hass = yield from async_from_config_file(
config_path, hass, verbose, skip_pip, log_rotate_days)
future.set_result(re_hass)
# pylint: disable=broad-except
except Exception as exc:
future.set_exception(exc)
# run task
future = asyncio.Future(loop=hass.loop)
hass.loop.create_task(_async_init_from_config_file(future))
hass.loop.run_until_complete(future)
return future.result()
@asyncio.coroutine
def async_from_config_file(config_path: str,
hass: core.HomeAssistant,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
yield from hass.loop.run_in_executor(
None, mount_local_lib_path, config_dir)
async_enable_logging(hass, verbose, log_rotate_days)
try:
config_dict = yield from hass.loop.run_in_executor(
None, conf_util.load_yaml_config_file, config_path)
except HomeAssistantError:
return None
finally:
clear_secret_cache()
hass = yield from async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip)
return hass
@core.callback
def async_enable_logging(hass: core.HomeAssistant, verbose: bool=False,
log_rotate_days=None) -> None:
"""Setup the logging.
This method must be run in the event loop.
"""
logging.basicConfig(level=logging.INFO)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%y-%m-%d %H:%M:%S'
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
@asyncio.coroutine
def async_stop_async_handler(event):
"""Cleanup async handler."""
logging.getLogger('').removeHandler(async_handler)
yield from async_handler.async_close(blocking=True)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger('')
logger.addHandler(async_handler)
logger.setLevel(logging.INFO)
else:
_LOGGER.error(
'Unable to setup error log %s (access denied)', err_log_path)
def log_exception(ex, domain, config, hass):
"""Generate log exception for config validation."""
run_callback_threadsafe(
hass.loop, async_log_exception, ex, domain, config, hass).result()
@core.callback
def _async_persistent_notification(hass: core.HomeAssistant, component: str,
link: Optional[bool]=False):
"""Print a persistent notification.
This method must be run in the event loop.
"""
errors = hass.data.get(DATA_PERSISTENT_ERRORS)
if errors is None:
errors = hass.data[DATA_PERSISTENT_ERRORS] = {}
errors[component] = errors.get(component) or link
_lst = [HA_COMPONENT_URL.format(name.replace('_', '-'), name)
if link else name for name, link in errors.items()]
message = ('The following components and platforms could not be set up:\n'
'* ' + '\n* '.join(list(_lst)) + '\nPlease check your config')
persistent_notification.async_create(
hass, message, 'Invalid config', 'invalid_config')
@core.callback
def async_log_exception(ex, domain, config, hass):
"""Generate log exception for config validation.
This method must be run in the event loop.
"""
message = 'Invalid config for [{}]: '.format(domain)
if hass is not None:
_async_persistent_notification(hass, domain, True)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join(str(m) for m in ex.path))
else:
message += '{}.'.format(humanize_error(config, ex))
domain_config = config.get(domain, config)
message += " (See {}, line {}). ".format(
getattr(domain_config, '__config_file__', '?'),
getattr(domain_config, '__line__', '?'))
if domain != 'homeassistant':
message += ('Please check the docs at '
'https://home-assistant.io/components/{}/'.format(domain))
_LOGGER.error(message)
def mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
Async friendly.
"""
deps_dir = os.path.join(config_dir, 'deps')
if deps_dir not in sys.path:
sys.path.insert(0, os.path.join(config_dir, 'deps'))
return deps_dir
| |
# -*- coding: utf-8 -*-
import hashlib
import os
import shutil
import requests
import yaml
import tempfile
from jinja2 import FileSystemLoader, Environment
from pykwalify.core import Core
from pykwalify.errors import SchemaError
from dogen.template_helper import TemplateHelper
from dogen.tools import Tools
from dogen import version, DEFAULT_SCRIPT_EXEC, DEFAULT_SCRIPT_USER
from dogen.errors import Error
SUPPORTED_HASH_ALGORITHMS = ['sha256', 'sha1', 'md5']
class Generator(object):
def __init__(self, log, args, plugins=[]):
self.log = log
self.pwd = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))
self.descriptor = os.path.realpath(args.path)
self.without_sources = args.without_sources
self.output = args.output
self.dockerfile = os.path.join(self.output, "Dockerfile")
self.template = args.template
self.scripts_path = args.scripts_path
ssl_verify = None
if args.skip_ssl_verification:
ssl_verify = False
self.ssl_verify = ssl_verify
self.plugins = []
for plugin in plugins:
self.plugins.append(plugin(self, args))
def _fetch_file(self, location, output=None):
"""
Fetches remote file and saves it under output. If no
output path is provided, a temporary file is created
and path to this file is returned.
SSL verification could be disabled by setting
self.ssl_verify to False.
"""
self.log.debug("Fetching '%s' file..." % location)
if not output:
output = tempfile.mktemp("-dogen")
self.log.debug("Fetched file will be saved as '%s'..." % os.path.basename(output))
r = requests.get(location, verify=self.ssl_verify, stream=True)
if r.status_code != 200:
raise Error("Could not download file from %s" % location)
with open(output, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return output
def _handle_custom_template(self):
"""
Fetches custom template (if provided) and saves as temporary
file. This file is removed later in the process.
"""
if not self.template:
return
self.log.info("Using custom provided template file: '%s'" % self.template)
if Tools.is_url(self.template):
self.template = self._fetch_file(self.template)
if not os.path.exists(self.template):
raise Error("Template file '%s' could not be found. Please make sure you specified correct path or check if the file was successfully fetched." % self.template)
def configure(self):
"""
Reads configuration values from the descriptor, if provided.
Some Dogen configuration values can be set in the YAML
descriptor file using the 'dogen' section.
"""
self._validate_cfg()
if not self.scripts_path:
# If scripts directory is not provided, see if there is a "scripts"
# directory next to the descriptor. If found - assume that's the
# directory containing scripts.
scripts = os.path.join(os.path.dirname(self.descriptor), "scripts")
if os.path.exists(scripts) and os.path.isdir(scripts):
self.scripts_path = scripts
if 'user' not in self.cfg:
self.cfg['user'] = 0
dogen_cfg = self.cfg.get('dogen')
if not dogen_cfg:
return
required_version = dogen_cfg.get('version')
if required_version:
# Check if the current runnig version of Dogen
# is the one the descriptor is expecting.
if required_version != version:
raise Error("You try to parse descriptor that requires Dogen version %s, but you run version %s" % (required_version, version))
ssl_verify = dogen_cfg.get('ssl_verify')
if self.ssl_verify is None and ssl_verify is not None:
self.ssl_verify = ssl_verify
template = dogen_cfg.get('template')
if template and not self.template:
self.template = template
scripts = dogen_cfg.get('scripts_path')
if scripts and not self.scripts_path:
self.scripts_path = scripts
if self.scripts_path and not os.path.exists(self.scripts_path):
raise Error("Provided scripts directory '%s' does not exist" % self.scripts_path)
def _handle_scripts(self):
if not self.cfg.get('scripts'):
return
for script in self.cfg['scripts']:
package = script['package']
src_path = os.path.join(self.scripts_path, package)
output_path = os.path.join(self.output, "scripts", package)
possible_exec = os.getenv('DOGEN_SCRIPT_EXEC', DEFAULT_SCRIPT_EXEC)
if "exec" not in script and os.path.exists(os.path.join(src_path, possible_exec)):
script['exec'] = possible_exec
if "user" not in script:
script['user'] = os.getenv('DOGEN_SCRIPT_USER', DEFAULT_SCRIPT_USER)
# Poor-man's workaround for not copying multiple times the same thing
if not os.path.exists(output_path):
self.log.info("Copying package '%s'..." % package)
shutil.copytree(src=src_path, dst=output_path)
self.log.debug("Done.")
def _validate_cfg(self):
"""
Open and parse the YAML configuration file and ensure it matches
our Schema for a Dogen configuration.
"""
# Fail early if descriptor file is not found
if not os.path.exists(self.descriptor):
raise Error("Descriptor file '%s' could not be found. Please make sure you specified correct path." % self.descriptor)
schema_path = os.path.join(self.pwd, "schema", "kwalify_schema.yaml")
schema = {}
with open(schema_path, 'r') as fh:
schema = yaml.safe_load(fh)
if schema is None:
raise Error("couldn't read a valid schema at %s" % schema_path)
for plugin in self.plugins:
plugin.extend_schema(schema)
with open(self.descriptor, 'r') as stream:
self.cfg = yaml.safe_load(stream)
c = Core(source_data=self.cfg, schema_data=schema)
try:
c.validate(raise_exception=True)
except SchemaError as e:
raise Error(e)
def run(self):
# Set Dogen settings if provided in descriptor
self.configure()
# Special case for ssl_verify setting. Setting it to None
# in CLI if --skip-ssl-verification is not set to make it
# possible to determine which setting should be used.
# This means that we need to se the ssl_verify to the
# default value of True is not set.
if self.ssl_verify is None:
self.ssl_verify = True
for plugin in self.plugins:
plugin.prepare(cfg=self.cfg)
if self.template:
self._handle_custom_template()
# Remove the target scripts directory
shutil.rmtree(os.path.join(self.output, "scripts"), ignore_errors=True)
if not os.path.exists(self.output):
os.makedirs(self.output)
if self.scripts_path:
self._handle_scripts()
else:
self.log.warn("No scripts will be copied, mistake?")
for plugin in self.plugins:
plugin.before_sources(cfg=self.cfg)
self.handle_sources()
self.render_from_template()
for plugin in self.plugins:
plugin.after_sources(files=self.cfg.get('artifacts'))
self.log.info("Finished!")
def render_from_template(self):
if not self.cfg.get('labels'):
self.cfg['labels'] = []
labels = {}
for label in self.cfg.get('labels'):
labels[label['name']] = label['value']
# https://github.com/jboss-dockerfiles/dogen/issues/129
# https://github.com/jboss-dockerfiles/dogen/issues/137
for label in ['maintainer', 'description']:
value = self.cfg.get(label)
if value and not label in labels:
self.cfg['labels'].append({'name': label, 'value': value})
labels[label] = value
# https://github.com/jboss-dockerfiles/dogen/issues/195
if 'summary' not in labels:
if 'description' in labels:
self.cfg['labels'].append({'name': 'summary', 'value': labels.get('description')})
if self.template:
template_file = self.template
else:
self.log.debug("Using dogen provided template file")
template_file = os.path.join(self.pwd, "templates", "template.jinja")
self.log.info("Rendering Dockerfile...")
loader = FileSystemLoader(os.path.dirname(template_file))
env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)
env.globals['helper'] = TemplateHelper()
template = env.get_template(os.path.basename(template_file))
with open(self.dockerfile, 'wb') as f:
f.write(template.render(self.cfg).encode('utf-8'))
self.log.debug("Done")
if self.template and Tools.is_url(self.template):
self.log.debug("Removing temporary template file...")
os.remove(self.template)
def handle_sources(self):
if 'sources' not in self.cfg or self.without_sources:
return []
self.log.info("Handling artifacts...")
self.cfg['artifacts'] = {}
sources_cache = os.environ.get("DOGEN_SOURCES_CACHE")
self.log.debug("Source cache will be used for all artifacts")
for source in self.cfg['sources']:
url = source.get('url')
artifact = source.get('artifact')
if url:
self.log.warn("The 'url' key is deprecated, please use 'artifact' for specifying the %s artifact location" % url)
if artifact:
self.log.warn("You specified both: 'artifact' and 'url' for a source file, 'artifact': will be used: %s" % artifact)
else:
# Backward compatibility
artifact = url
if not artifact:
raise Error("Artifact location for one or more sources was not provided, please check your image descriptor!")
self.log.info("Handling artifact '%s'" % artifact)
basename = os.path.basename(artifact)
target = source.get('target')
# In case we specify target name for the artifact - use it
if not target:
target = basename
filename = ("%s/%s" % (self.output, target))
passed = False
algorithms = []
md5sum = source.get('md5sum')
if md5sum:
self.log.warn("The 'md5sum' key is deprecated, please use 'md5' for %s. Or better switch to sha256 or sha1." % artifact)
# Backwards compatibility for md5sum
if not source.get('md5'):
source['md5'] = md5sum
for supported_algorithm in SUPPORTED_HASH_ALGORITHMS:
if not source.get(supported_algorithm):
continue
algorithms.append(supported_algorithm)
try:
if os.path.exists(filename):
if algorithms:
for algorithm in algorithms:
self.check_sum(filename, source[algorithm], algorithm)
passed = True
except Exception as e:
self.log.debug(str(e))
self.log.warn("Local file doesn't match provided checksum, artifact '%s' will be downloaded again" % artifact)
passed = False
if not passed:
if sources_cache:
cached_artifact = sources_cache.replace('#filename#', basename)
if algorithms:
if len(algorithms) > 1:
self.log.warn("You specified multiple algorithms for '%s' artifact, but only '%s' will be used to fetch it from cache" % (artifact, algorithms[0]))
cached_artifact = cached_artifact.replace('#hash#', source[algorithms[0]]).replace('#algorithm#', algorithms[0])
try:
self._fetch_file(cached_artifact, filename)
except Exception as e:
self.log.warn("Could not download artifact from cached location: '%s': %s. Please make sure you set the correct value for DOGEN_SOURCES_CACHE (currently: '%s')." % (cached_artifact, str(e), sources_cache))
self._download_source(artifact, filename, source.get('hint'))
else:
self._download_source(artifact, filename, source.get('hint'))
if algorithms:
for algorithm in algorithms:
self.check_sum(filename, source[algorithm], algorithm)
if algorithms:
self.cfg['artifacts'][target] = "%s:%s" % (algorithms[0], source[algorithms[0]])
else:
self.log.warn("No checksum was specified for artifact '%s'!" % artifact)
self.cfg['artifacts'][target] = None
def _download_source(self, artifact, filename, hint=None):
if Tools.is_url(artifact):
self.log.warn("Trying to download the '%s' artifact from original location" % artifact)
try:
self._fetch_file(artifact, filename)
except Exception as e:
raise Error("Could not download artifact from orignal location, reason: %s" % str(e))
else:
if hint:
self.log.info(hint)
self.log.info("Please download the '%s' artifact manually and save it as '%s'" % (artifact, filename))
raise Error("Artifact '%s' could not be fetched!" % artifact)
def check_sum(self, filename, checksum, algorithm):
self.log.debug("Checking '%s' %s hash..." % (os.path.basename(filename), algorithm))
hash = getattr(hashlib, algorithm)()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(65536), b""):
hash.update(chunk)
filesum = hash.hexdigest()
if filesum.lower() != checksum.lower():
raise Error("The %s computed for the '%s' file ('%s') doesn't match the '%s' value" % (algorithm, filename, filesum, checksum))
self.log.debug("Hash is correct.")
| |
from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
from .exceptions import AmbiguityError, BadMigrationError, NodeNotFoundError
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Remember the possible replacements to generate more meaningful error
# messages
reverse_replacements = {}
for key, migration in replacing.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
# List of migrations whose dependency on `replaced` needs
# to be updated to a dependency on `key`.
to_update = []
# Child key may itself be replaced, in which case it might
# not be in `normal` anymore (depending on whether we've
# processed its replacement yet). If it's present, we go
# ahead and update it; it may be deleted later on if it is
# replaced, but there's no harm in updating it regardless.
if child_key in normal:
to_update.append(normal[child_key])
# If the child key is replaced, we update its replacement's
# dependencies too, if necessary. (We don't know if this
# replacement will actually take effect or not, but either
# way it's OK to update the replacing migration).
if child_key in reverse_replacements:
for replaces_child_key in reverse_replacements[child_key]:
if replaced in replacing[replaces_child_key].dependencies:
to_update.append(replacing[replaces_child_key])
# Actually perform the dependency update on all migrations
# that require it.
for migration_needing_update in to_update:
migration_needing_update.dependencies.remove(replaced)
migration_needing_update.dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Store the replacement migrations for later checks
self.replacements = replacing
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
def _reraise_missing_dependency(migration, missing, exc):
"""
Checks if ``missing`` could have been replaced by any squash
migration but wasn't because the the squash migration was partially
applied before. In that case raise a more understandable exception.
#23556
"""
if missing in reverse_replacements:
candidates = reverse_replacements.get(missing, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
exc_value = NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
migration, missing[0], missing[1], tries
),
missing)
exc_value.__cause__ = exc
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
raise exc
# Add all internal dependencies first to ensure __first__ dependencies
# find the correct root node.
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325)
continue
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there. To make the raised exception
# more understandable we check if parent could have been
# replaced but hasn't (eg partially applied squashed
# migration)
_reraise_missing_dependency(migration, parent, e)
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] == key[0]:
# Internal dependencies already added.
continue
parent = self.check_key(parent, key[0])
if parent is not None:
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there.
_reraise_missing_dependency(migration, parent, e)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
try:
self.graph.add_dependency(migration, child, key)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "child" is not in there.
_reraise_missing_dependency(migration, child, e)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
| |
#!/usr/local/epd/bin/python
#------------------------------------------------------------------------------------------------------
# Dirac propagator based on:
# Fillion-Gourdeau, Francois, Lorin, Emmanuel, Bandrauk, Andre D.
# Numerical Solution of the Time-Dependent Dirac Equation in Coordinate Space without Fermion-Doubling
#------------------------------------------------------------------------------------------------------
import numpy as np
import scipy.fftpack as fftpack
import h5py
import time
import sys
from scipy.special import laguerre
from scipy.special import genlaguerre
from scipy.special import legendre
#from pyfft.cuda import Plan
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import cufft_wrapper as cuda_fft
#-------------------------------------------------------------------------------
Potential_0_Average_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s; // Constants
__device__ double Potential0(double t, double x, double y)
{
return %s ;
}
//............................................................................................................
__global__ void Kernel( pycuda::complex<double>* preExpectationValue,
pycuda::complex<double>* Psi1, pycuda::complex<double>* Psi2, pycuda::complex<double>* Psi3, pycuda::complex<double>* Psi4,
double t)
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
double x = dX*(j - DIM_X/2);
double y = dY*(i - DIM_Y/2);
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x ;
double out;
out = Potential0( t, x, y)* pow( abs( Psi1[indexTotal] ) , 2 );
out += Potential0( t, x, y)* pow( abs( Psi2[indexTotal] ) , 2 );
out += Potential0( t, x, y)* pow( abs( Psi3[indexTotal] ) , 2 );
out += Potential0( t, x, y)* pow( abs( Psi4[indexTotal] ) , 2 );
preExpectationValue[indexTotal] = out;
}
"""
#--------------------------------------------------------------------------------
BaseCUDAsource_K = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4 )
{
pycuda::complex<double> I;
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x;
double px = dPx*(j - DIM_X/2);
double py = dPy*(i - DIM_Y/2);
double pp = sqrt( px*px + py*py + pow(10.,-12));
double sdt = sin( c*dt*pp )/pp;
double cosdt = cos(c*dt*pp);
pycuda::complex<double> p_plus = pycuda::complex<double>(py,px);
pycuda::complex<double> p_mius = pycuda::complex<double>(py,-px);
_Psi1= cos(c*dt*pp)*Psi1[indexTotal] - p_plus*sdt*Psi4[indexTotal];
_Psi2= cos(c*dt*pp)*Psi2[indexTotal] + p_mius*sdt*Psi3[indexTotal] ;
_Psi3= - p_plus*sdt*Psi2[indexTotal] + cos(c*dt*pp)*Psi3[indexTotal] ;
_Psi4= p_mius*sdt*Psi1[indexTotal] + cos(c*dt*pp)*Psi4[indexTotal];
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}
"""
DiracPropagatorA_source = """
//
// source code for the Dirac propagator with scalar-vector potential interaction
// and smooth time dependence
//
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s // Define the essential constants
// The vector potential must be supplied with UP indices and: eA
__device__ double A0(double t, double x, double y)
{
return %s ;
}
__device__ double A1(double t, double x, double y)
{
return %s ;
}
__device__ double A2(double t, double x, double y)
{
return %s ;
}
__device__ double A3(double t, double x, double y)
{
return %s ;
}
__device__ double VectorPotentialSquareSum(double t, double x, double y)
{
return pow( A1(t,x,y), 2.) + pow( A2(t,x,y), 2.) + pow( A3(t,x,y), 2.);
}
//-------------------------------------------------------------------------------------------------------------
__global__ void DiracPropagatorA_Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2, pycuda::complex<double> *Psi3, pycuda::complex<double> *Psi4, double t )
{
pycuda::complex<double> I = pycuda::complex<double>(0.,1.);
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
int j = (threadIdx.x + DIM_X/2)%%DIM_X;
int i = (blockIdx.x + DIM_Y/2)%%DIM_Y;
const int indexTotal = threadIdx.x + DIM_X * blockIdx.x ;
double x = dX*(j - DIM_X/2);
double y = dY*(i - DIM_Y/2);
pycuda::complex<double> _Psi1, _Psi2, _Psi3, _Psi4;
double F;
F = sqrt( pow(mass*c*c,2.) + c*VectorPotentialSquareSum(t,x,y) );
pycuda::complex<double> expV0 = exp( -I*dt*A0(t,x,y) );
pycuda::complex<double> U11 = pycuda::complex<double>( cos(dt*F) , -mass*c*c*sin(dt*F)/F );
pycuda::complex<double> U22 = U11;
pycuda::complex<double> U33 = pycuda::complex<double>( cos(dt*F) , mass*c*c*sin(dt*F)/F );
pycuda::complex<double> U44 = U33;
pycuda::complex<double> U31 = pycuda::complex<double>( 0., A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U41 = pycuda::complex<double>( -A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U32 = pycuda::complex<double>( A2(t,x,y)*sin(dt*F)/F , A1(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U42 = pycuda::complex<double>( 0., -A3(t,x,y)*sin(dt*F)/F );
pycuda::complex<double> U13 = U31;
pycuda::complex<double> U14 = U32;
pycuda::complex<double> U23 = U41;
pycuda::complex<double> U24 = U42;
_Psi1 = expV0*( U11*Psi1[indexTotal] + U13*Psi3[indexTotal] + U14*Psi4[indexTotal] );
_Psi2 = expV0*( U22*Psi2[indexTotal] + U23*Psi3[indexTotal] + U24*Psi4[indexTotal] );
_Psi3 = expV0*( U31*Psi1[indexTotal] + U32*Psi2[indexTotal] + U33*Psi3[indexTotal] );
_Psi4 = expV0*( U41*Psi1[indexTotal] + U42*Psi2[indexTotal] + U44*Psi4[indexTotal] );
Psi1[indexTotal] = _Psi1;
Psi2[indexTotal] = _Psi2;
Psi3[indexTotal] = _Psi3;
Psi4[indexTotal] = _Psi4;
}
"""
BaseCUDAsource_AbsorbBoundary_xy = """
//............................................................................................
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void AbsorbBoundary_Kernel(
pycuda::complex<double> *Psi1, pycuda::complex<double> *Psi2,
pycuda::complex<double> *Psi3 , pycuda::complex<double> *Psi4 )
{
const int DIM_X = blockDim.x;
const int DIM_Y = gridDim.x;
const int j = (threadIdx.x + DIM_X/2)%DIM_X;
const int i = (blockIdx.x + DIM_Y/2)%DIM_Y;
const int indexTotal = threadIdx.x + DIM_X*blockIdx.x + DIM_X*DIM_Y*blockIdx.y;
double wx = pow(3.*double(DIM_X)/100.,2);
double wy = pow(3.*double(DIM_Y)/100.,2);
//--------------------------- boundary in x --------------------------------------
double expB = 1. - exp( -double(j*j)/wx );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
expB = 1.- exp( -(j - DIM_X+1. )*(j - DIM_X+1.)/wx );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
//-------------- boundary in y
expB = 1.- exp( -double(i*i)/wy );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
expB = 1. - exp( -double( (i - DIM_Y + 1)*(i - DIM_Y + 1) )/wy );
Psi1[indexTotal] *= expB;
Psi2[indexTotal] *= expB;
Psi3[indexTotal] *= expB;
Psi4[indexTotal] *= expB;
}
"""
#-----------------------------------------------------------------------------------------------
class GPU_Dirac2D:
"""
Propagator 2D for the Dirac equation
Parameters:
gridDIM_X
gridDIM_Y
min_X
min_Y
timeSteps
skipFrames: Number of frames to be saved
frameSaveMode = 'Density' saves only the density
frameSaveMode = 'Spinor' saves the whole spinor
"""
def __init__(self, gridDIM, amplitude, dt, timeSteps, skipFrames = 1,frameSaveMode='Density'):
X_amplitude,Y_amplitude = amplitude
X_gridDIM, Y_gridDIM = gridDIM
self.dX = 2.*X_amplitude/np.float(X_gridDIM)
self.dY = 2.*Y_amplitude/np.float(Y_gridDIM)
self.X_amplitude = X_amplitude
self.Y_amplitude = Y_amplitude
self.X_gridDIM = X_gridDIM
self.Y_gridDIM = Y_gridDIM
self.min_X = -X_amplitude
self.min_Y = -Y_amplitude
self.timeSteps = timeSteps
self.skipFrames = skipFrames
self.frameSaveMode = frameSaveMode
rangeX = np.linspace(-X_amplitude, X_amplitude - self.dX, X_gridDIM )
rangeY = np.linspace(-Y_amplitude, Y_amplitude - self.dY, Y_gridDIM )
self.X = fftpack.fftshift(rangeX)[np.newaxis, : ]
self.Y = fftpack.fftshift(rangeY)[:, np.newaxis ]
self.X_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X + 0.*self.Y, dtype = np.complex128) )
self.Y_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Y + 0.*self.X, dtype = np.complex128) )
# min_Px = np.pi*self.X_gridDIM/(2*self.min_X)
Px_amplitude = np.pi/self.dX
self.dPx = 2*Px_amplitude/self.X_gridDIM
Px_range = np.linspace( -Px_amplitude, Px_amplitude - self.dPx, self.X_gridDIM )
Py_amplitude = np.pi/self.dY
self.dPy = 2*Py_amplitude/self.Y_gridDIM
Py_range = np.linspace( -Py_amplitude, Py_amplitude - self.dPy, self.Y_gridDIM )
self.Px = fftpack.fftshift(Px_range)[np.newaxis,:]
self.Py = fftpack.fftshift(Py_range)[:,np.newaxis]
self.Px_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Px + 0.*self.Py, dtype = np.complex128) )
self.Py_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Py + 0.*self.Px, dtype = np.complex128) )
self.dt = dt
#................ Strings: mass,c,dt must be defined in children class....................
self.CUDA_constants_essential = '__constant__ double mass=%f; '%self.mass
self.CUDA_constants_essential += '__constant__ double c=%f; '%self.c
self.CUDA_constants_essential += '__constant__ double dt=%f; '%self.dt
self.CUDA_constants_essential += '__constant__ double dX=%f; '%self.dX
self.CUDA_constants_essential += '__constant__ double dY=%f; '%self.dY
self.CUDA_constants_essential += '__constant__ double dPx=%f; '%self.dPx
self.CUDA_constants_essential += '__constant__ double dPy=%f; '%self.dPy
self.CUDA_constants = self.CUDA_constants_essential #+ self.CUDA_constants_additional
#................ CUDA Kernels ...........................................................
self.DiracPropagatorK = SourceModule(BaseCUDAsource_K%self.CUDA_constants,arch="sm_20").get_function( "Kernel" )
self.DiracPropagatorA = \
SourceModule( DiracPropagatorA_source%(
self.CUDA_constants,
self.Potential_0_String,
self.Potential_1_String,
self.Potential_2_String,
self.Potential_3_String),arch="sm_20").get_function( "DiracPropagatorA_Kernel" )
self.Potential_0_Average_Function = \
SourceModule( Potential_0_Average_source%(
self.CUDA_constants,self.Potential_0_String) ).get_function("Kernel" )
self.DiracAbsorbBoundary_xy = \
SourceModule(BaseCUDAsource_AbsorbBoundary_xy,arch="sm_20").get_function( "AbsorbBoundary_Kernel" )
#...........................FFT PLAN.................................................
self.plan_Z2Z_2D = cuda_fft.Plan_Z2Z( (self.X_gridDIM,self.Y_gridDIM) )
def Fourier_X_To_P_GPU(self,W_out_GPU):
cuda_fft.fft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
def Fourier_P_To_X_GPU(self,W_out_GPU):
cuda_fft.ifft_Z2Z( W_out_GPU, W_out_GPU , self.plan_Z2Z_2D )
W_out_GPU *= 1./float(self.X_gridDIM*self.Y_gridDIM)
def Fourier_4_X_To_P_GPU(self, Psi1, Psi2, Psi3, Psi4):
self.Fourier_X_To_P_GPU(Psi1)
self.Fourier_X_To_P_GPU(Psi2)
self.Fourier_X_To_P_GPU(Psi3)
self.Fourier_X_To_P_GPU(Psi4)
def Fourier_4_P_To_X_GPU(self, Psi1, Psi2, Psi3, Psi4):
self.Fourier_P_To_X_GPU(Psi1)
self.Fourier_P_To_X_GPU(Psi2)
self.Fourier_P_To_X_GPU(Psi3)
self.Fourier_P_To_X_GPU(Psi4)
#-------------------------------------------------------------------------------------------------------------------
# Gaussian PARTICLE spinors
def Spinor_Particle_SpinUp(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*( p0 + self.mass*self.c )
Psi2 = rho*0.
Psi3 = rho*0.
Psi4 = rho*( px + 1j*py )
return np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_Particle_SpinDown(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*0.
Psi2 = rho*( p0 + self.mass*self.c )
Psi3 = rho*( px - 1j*py )
Psi4 = rho*0.
return np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_AntiParticle_SpinDown(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = -np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*0.
Psi2 = rho*( px + 1j*py )
Psi3 = rho*( p0 - self.mass*self.c )
Psi4 = rho*0.
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
def Spinor_AntiParticle_SpinUp(self, p_init, modulation_Function ):
"""
"""
px, py = p_init
rho = np.exp(1j*self.X*px + 1j*self.Y*py )*modulation_Function( self.X , self.Y )
p0 = -np.sqrt( px*px + py*py + (self.mass*self.c)**2 )
Psi1 = rho*( px - 1j*py )
Psi2 = rho*0.
Psi3 = rho*0.
Psi4 = rho*( p0 - self.mass*self.c )
return -1j*np.array([Psi1, Psi2, Psi3, Psi4 ])
#.......................................................................
def Boost(self, p1,p2):
# Boost matrix in Dirac gamma matrices
p0 = np.sqrt( (self.mass*self.c)**2 + p1**2 + p2**2 )
K = np.sqrt( 2*self.mass*self.c*(self.mass*self.c + p0) )
B00 = self.mass*self.c + p0
p_Plus = p1 + 1j*p2
p_Minus = p1 - 1j*p2
return np.array([ [B00, 0., 0., p_Minus], [0., B00, p_Plus,0.], [0.,p_Minus,B00,0.], [p_Plus,0.,0.,B00] ])
def LandauLevelSpinor(self, B , n , x , y ,type=1):
# Symmetric Gauge
def energy(n):
return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n )
K = B*( (self.X-x)**2 + (self.Y-y)**2)/( 4.*self.c*self.hBar )
psi1 = np.exp(-K)*( energy(n) + self.mass*self.c**2 )*laguerre(n)( 2*K )
psi3 = np.exp(-K)*( energy(n) - self.mass*self.c**2 )*laguerre(n)( 2*K )
if n>0:
psi2 = 1j*np.exp(-K)*( self.X-x + 1j*(self.Y-y) )*genlaguerre(n-1,1)( 2*K )
else:
psi2 = 0.*K
psi4 = psi2
if type==1:
spinor = np.array([ psi1 , 0*psi2 , 0*psi3 , psi4 ])
elif type ==2:
spinor = np.array([ 0*psi1 , psi2 , psi3 , 0*psi4 ])
else :
print 'Error: type spinor must be 1 or 2'
norm = self.Norm(spinor)
spinor /= norm
return spinor
def LandauLevelSpinor_Boosted(self, B , n , x , y , py ):
K = B*( (self.X-x)**2 + (self.Y-y)**2)/( 4.*self.c*self.hBar )
p0 = np.sqrt( (self.mass*self.c)**2 + py**2 )
psi1 = 0j*K
psi2 = 1j*self.c* np.exp(-K) * py *( p0 - self.mass*self.c )
psi3 = self.c* np.exp(-K) * py*py*( p0 - self.mass*self.c ) + 0j
psi4 = 0j*K
spinor = np.array([ psi1 , psi2 , psi3 , psi4 ])
norm = self.Norm(spinor)
spinor /= norm
return spinor
def LandaoLevelSpinor_GaugeX(self, B , n , Py ):
def energy(n):
return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n )
K = B*(self.X - self.c*Py/B)**2/( 2.*self.c*self.hBar )
psi1 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) )
psi3 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) )
if n>0:
psi2 = np.exp(-K)*( self.mass*self.c**2 + energy(n) )* legendre(n-1)( K/np.sqrt(B*self.c*self.hBar) )
psi2 = 2*1j*n*np.sqrt(B*self.c*self.hBar)
psi4 = -psi2
else:
psi2 = 0.*K
psi4 = 0.*K
spinor = np.array([psi1 , psi2 , psi3 , psi2 ])
norm = self.Norm(spinor)
spinor /= norm
return spinor
#.............................................................................................
def _FilterElectrons(self,sign):
'''
Routine that uses the Fourier transform to filter positrons/electrons
Options:
sign=1 Leaves electrons
sign=-1 Leaves positrons
'''
print ' '
print ' Filter Electron routine '
print ' '
min_Px = np.pi*self.X_gridDIM/(2*self.min_X)
dPx = 2*np.abs(min_Px)/self.X_gridDIM
px_Vector = fftpack.fftshift ( np.linspace(min_Px, np.abs(min_Px) - dPx, self.X_gridDIM ))
min_Py = np.pi*self.Y_gridDIM/(2*self.min_Y)
dPy = 2*np.abs(min_Py)/self.Y_gridDIM
py_Vector = fftpack.fftshift ( np.linspace(min_Py, np.abs(min_Py) - dPy, self.Y_gridDIM ))
px = px_Vector[np.newaxis,:]
py = py_Vector[:,np.newaxis]
sqrtp = sign*2*np.sqrt( self.mass*self.mass*self.c**4 + self.c*self.c*px*px + self.c*self.c*py*py )
aa = sign*self.mass*self.c*self.c/sqrtp
bb = sign*(px/sqrtp - 1j*py/sqrtp)
cc = sign*(px/sqrtp + 1j*py/sqrtp)
ElectronProjector = np.matrix([ [0.5+aa , 0. , 0. , bb ],
[0. , 0.5+aa , cc , 0. ],
[0. , bb , 0.5-aa , 0. ],
[cc , 0. , 0. , 0.5-aa] ])
psi1_fft = fftpack.fft2( self.Psi1_init )
psi2_fft = fftpack.fft2( self.Psi2_init )
psi3_fft = fftpack.fft2( self.Psi3_init )
psi4_fft = fftpack.fft2( self.Psi4_init )
psi1_fft_electron = ElectronProjector[0,0]*psi1_fft + ElectronProjector[0,1]*psi2_fft +\
ElectronProjector[0,2]*psi3_fft + ElectronProjector[0,3]*psi4_fft
psi2_fft_electron = ElectronProjector[1,0]*psi1_fft + ElectronProjector[1,1]*psi2_fft +\
ElectronProjector[1,2]*psi3_fft + ElectronProjector[1,3]*psi4_fft
psi3_fft_electron = ElectronProjector[2,0]*psi1_fft + ElectronProjector[2,1]*psi2_fft +\
ElectronProjector[2,2]*psi3_fft + ElectronProjector[2,3]*psi4_fft
psi4_fft_electron = ElectronProjector[3,0]*psi1_fft + ElectronProjector[3,1]*psi2_fft +\
ElectronProjector[3,2]*psi3_fft + ElectronProjector[3,3]*psi4_fft
self.Psi1_init = fftpack.ifft2( psi1_fft_electron )
self.Psi2_init = fftpack.ifft2( psi2_fft_electron )
self.Psi3_init = fftpack.ifft2( psi3_fft_electron )
self.Psi4_init = fftpack.ifft2( psi4_fft_electron )
def FilterElectrons(self,sign, Psi):
'''
Routine that uses the Fourier transform to filter positrons/electrons
Options:
sign=1 Leaves electrons
sign=-1 Leaves positrons
'''
print ' '
print ' Filter Electron routine '
print ' '
px = self.c*self.Px
py = self.c*self.Py
m = self.mass
c= self.c
energy = np.sqrt( (m*c**2)**2 + px**2 + py**2 )
EP_11 = 1. + sign*m*c**2/energy
EP_12 = 0.
EP_13 = 0.
EP_14 = sign*(px - 1j*py)/energy
EP_21 = 0.
EP_22 = 1. + sign*m*c**2/energy
EP_23 = sign*(px + 1j*py)/energy
EP_24 = 0.
EP_31 = 0.
EP_32 = sign*(px - 1j*py)/energy
EP_33 = 1. - sign*m*c**2/energy
EP_34 = 0.
EP_41 = sign*(px + 1j*py)/energy
EP_42 = 0.
EP_43 = 0.
EP_44 = 1. - sign*m*c**2/energy
#Psi1, Psi2, Psi3, Psi4 = Psi
psi1_fft = fftpack.fft2( Psi[0] )
psi2_fft = fftpack.fft2( Psi[1] )
psi3_fft = fftpack.fft2( Psi[2] )
psi4_fft = fftpack.fft2( Psi[3] )
psi1_fft_electron = EP_11*psi1_fft + EP_12*psi2_fft + EP_13*psi3_fft + EP_14*psi4_fft
psi2_fft_electron = EP_21*psi1_fft + EP_22*psi2_fft + EP_23*psi3_fft + EP_24*psi4_fft
psi3_fft_electron = EP_31*psi1_fft + EP_32*psi2_fft + EP_33*psi3_fft + EP_34*psi4_fft
psi4_fft_electron = EP_41*psi1_fft + EP_42*psi2_fft + EP_43*psi3_fft + EP_44*psi4_fft
return np.array([ fftpack.ifft2( psi1_fft_electron ),
fftpack.ifft2( psi2_fft_electron ),
fftpack.ifft2( psi3_fft_electron ),
fftpack.ifft2( psi4_fft_electron ) ])
def save_Spinor(self,f1, t, Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU):
print ' progress ', 100*t/(self.timeSteps+1), '%'
PsiTemp = Psi1_GPU.get()
f1['1/real/'+str(t)] = PsiTemp.real
f1['1/imag/'+str(t)] = PsiTemp.imag
PsiTemp = Psi2_GPU.get()
f1['2/real/'+str(t)] = PsiTemp.real
f1['2/imag/'+str(t)] = PsiTemp.imag
PsiTemp = Psi3_GPU.get()
f1['3/real/'+str(t)] = PsiTemp.real
f1['3/imag/'+str(t)] = PsiTemp.imag
PsiTemp = Psi4_GPU.get()
f1['4/real/'+str(t)] = PsiTemp.real
f1['4/imag/'+str(t)] = PsiTemp.imag
def save_Density(self,f1,t,Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU):
print ' progress ', 100*t/(self.timeSteps+1), '%'
PsiTemp1 = Psi1_GPU.get()
PsiTemp2 = Psi2_GPU.get()
PsiTemp3 = Psi3_GPU.get()
PsiTemp4 = Psi4_GPU.get()
rho = np.abs(PsiTemp1)**2
rho += np.abs(PsiTemp2)**2
rho += np.abs(PsiTemp3)**2
rho += np.abs(PsiTemp4)**2
#print ' Save norm = ', np.sum(rho)*self.dX*self.dY
f1[str(t)] = np.ascontiguousarray(fftpack.fftshift(rho).astype(np.float32))
def load_Density(self, n, fileName=None ):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
probability = FILE['/'+str(n)][...]
FILE.close()
return probability
def load_Spinor(self, n, fileName=None ):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
psi1 = FILE['1/real/'+str(n)][...] + 1j*FILE['1/imag/'+str(n)][...]
psi2 = FILE['2/real/'+str(n)][...] + 1j*FILE['2/imag/'+str(n)][...]
psi3 = FILE['3/real/'+str(n)][...] + 1j*FILE['3/imag/'+str(n)][...]
psi4 = FILE['4/real/'+str(n)][...] + 1j*FILE['4/imag/'+str(n)][...]
FILE.close()
return np.array([ psi1, psi2, psi3, psi4 ])
def Density_From_Spinor(self,Psi):
rho = np.abs(Psi[0])**2
rho += np.abs(Psi[1])**2
rho += np.abs(Psi[2])**2
rho += np.abs(Psi[3])**2
return rho
def Norm( self, Psi):
norm = np.sum(np.abs(Psi[0])**2)
norm += np.sum(np.abs(Psi[1])**2)
norm += np.sum(np.abs(Psi[2])**2)
norm += np.sum(np.abs(Psi[3])**2)
norm *= self.dX*self.dY
norm = np.sqrt(norm)
return norm
def Norm_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = gpuarray.sum( Psi1.__abs__()**2 ).get()
norm += gpuarray.sum( Psi2.__abs__()**2 ).get()
norm += gpuarray.sum( Psi3.__abs__()**2 ).get()
norm += gpuarray.sum( Psi4.__abs__()**2 ).get()
norm = np.sqrt(norm*self.dX * self.dY )
#print ' norm GPU = ', norm
return norm
def Normalize_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = self.Norm_GPU(Psi1, Psi2, Psi3, Psi4)
Psi1 /= norm
Psi2 /= norm
Psi3 /= norm
Psi4 /= norm
#........................................................................
def Average_X( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.X_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.X_GPU).get()
average *= self.dX*self.dY
return average
def Average_Y( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Y_GPU).get()
average *= self.dX*self.dY
return average
def Average_Px( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Px_GPU).get()
average *= self.dX*self.dY
return average
def Average_Py( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Py_GPU).get()
average *= self.dX*self.dY
return average
#........................................................................
def _Average_Alpha1( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi4_GPU,Psi1_GPU.conj()).get()
average += gpuarray.dot(Psi3_GPU,Psi2_GPU.conj()).get()
average += gpuarray.dot(Psi2_GPU,Psi3_GPU.conj()).get()
average += gpuarray.dot(Psi1_GPU,Psi4_GPU.conj()).get()
average *= self.dX*self.dY
return average
def Average_Alpha1( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi2_GPU, Psi3_GPU.conj() ).get().real
average += gpuarray.dot( Psi1_GPU, Psi4_GPU.conj() ).get().real
average *= 2.*self.dX*self.dY
return average
def Average_Alpha2( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = - gpuarray.dot(Psi4_GPU,Psi1_GPU.conj()).get()
average += gpuarray.dot(Psi3_GPU,Psi2_GPU.conj()).get()
average += - gpuarray.dot(Psi2_GPU,Psi3_GPU.conj()).get()
average += gpuarray.dot(Psi1_GPU,Psi4_GPU.conj()).get()
average *= 1j*self.dX*self.dY
return average
def Average_Beta( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU,Psi1_GPU.conj()).get()
average += gpuarray.dot(Psi2_GPU,Psi2_GPU.conj()).get()
average += - gpuarray.dot(Psi3_GPU,Psi3_GPU.conj()).get()
average += - gpuarray.dot(Psi4_GPU,Psi4_GPU.conj()).get()
average *= self.dX*self.dY
return average
def Average_KEnergy( self, temp_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
energy = gpuarray.sum( Psi1_GPU*Psi1_GPU.conj() ).get()
energy += gpuarray.sum( Psi2_GPU*Psi2_GPU.conj() ).get()
energy -= gpuarray.sum( Psi3_GPU*Psi3_GPU.conj() ).get()
energy -= gpuarray.sum( Psi4_GPU*Psi4_GPU.conj() ).get()
energy *= self.mass*self.c*self.c*self.dPx*self.dPy
#
temp_GPU *= 0.
temp_GPU += Psi4_GPU * Psi1_GPU.conj()
temp_GPU += Psi1_GPU * Psi4_GPU.conj()
temp_GPU += Psi3_GPU * Psi2_GPU.conj()
temp_GPU += Psi2_GPU * Psi3_GPU.conj()
temp_GPU *= self.Px_GPU
#temp_GPU *= self.c
energy += gpuarray.sum( temp_GPU ).get()*self.dPx*self.dPy*self.c
#
temp_GPU *= 0.
temp_GPU += Psi4_GPU * Psi1_GPU.conj()
temp_GPU -= Psi1_GPU * Psi4_GPU.conj()
temp_GPU -= Psi3_GPU * Psi2_GPU.conj()
temp_GPU += Psi2_GPU * Psi3_GPU.conj()
temp_GPU *= self.Py_GPU
#temp_GPU *= -1j
energy += gpuarray.sum( temp_GPU ).get()*self.dPx*self.dPy*self.c*(-1j)
return energy
def Potential_0_Average(self, temp_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU ,t):
self.Potential_0_Average_Function( temp_GPU,
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU, t , block=self.blockCUDA, grid=self.gridCUDA )
return self.dX*self.dY * gpuarray.sum(temp_GPU).get()
def Norm_X_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = gpuarray.sum( Psi1.__abs__()**2 ).get()
norm += gpuarray.sum( Psi2.__abs__()**2 ).get()
norm += gpuarray.sum( Psi3.__abs__()**2 ).get()
norm += gpuarray.sum( Psi4.__abs__()**2 ).get()
norm = np.sqrt(norm*self.dX * self.dY )
return norm
def Norm_P_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = gpuarray.sum( Psi1.__abs__()**2 ).get()
norm += gpuarray.sum( Psi2.__abs__()**2 ).get()
norm += gpuarray.sum( Psi3.__abs__()**2 ).get()
norm += gpuarray.sum( Psi4.__abs__()**2 ).get()
norm = np.sqrt(norm*self.dPx * self.dPy )
return norm
def Normalize_X_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = self.Norm_X_GPU(Psi1, Psi2, Psi3, Psi4)
Psi1 /= norm
Psi2 /= norm
Psi3 /= norm
Psi4 /= norm
def Normalize_P_GPU( self, Psi1, Psi2, Psi3, Psi4):
norm = self.Norm_P_GPU(Psi1, Psi2, Psi3, Psi4)
Psi1 /= norm
Psi2 /= norm
Psi3 /= norm
Psi4 /= norm
#.....................................................................
def Run(self):
try :
import os
os.remove (self.fileName)
except OSError:
pass
f1 = h5py.File(self.fileName)
print '--------------------------------------------'
print ' Dirac Propagator 2D '
print '--------------------------------------------'
print ' save Mode = ', self.frameSaveMode
f1['x_gridDIM'] = self.X_gridDIM
f1['y_gridDIM'] = self.Y_gridDIM
#f1['x_min'] = self.min_X
#f1['y_min'] = self.min_Y
f1['x_amplitude'] = self.X_amplitude
f1['y_amplitude'] = self.Y_amplitude
# Redundant information on dx dy dz
f1['dx'] = self.dX
f1['dy'] = self.dY
f1['Potential_0_String'] = self.Potential_0_String
f1['Potential_1_String'] = self.Potential_1_String
f1['Potential_2_String'] = self.Potential_2_String
f1['Potential_3_String'] = self.Potential_3_String
self.Psi1_init, self.Psi2_init, self.Psi3_init, self.Psi4_init = self.Psi_init
Psi1_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi1_init, dtype=np.complex128) )
Psi2_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi2_init, dtype=np.complex128) )
Psi3_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi3_init, dtype=np.complex128) )
Psi4_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Psi4_init, dtype=np.complex128) )
_Psi1_GPU = gpuarray.zeros_like(Psi1_GPU)
_Psi2_GPU = gpuarray.zeros_like(Psi1_GPU)
_Psi3_GPU = gpuarray.zeros_like(Psi1_GPU)
_Psi4_GPU = gpuarray.zeros_like(Psi1_GPU)
#
print ' '
print 'number of steps = ', self.timeSteps, ' dt = ',self.dt
print 'dX = ', self.dX, 'dY = ', self.dY
print 'dPx = ', self.dPx, 'dPy = ', self.dPy
print ' '
print ' '
if self.frameSaveMode=='Spinor':
self.save_Spinor(f1, 0 , Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
if self.frameSaveMode=='Density':
self.save_Density(f1, 0, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
# ............................... Main LOOP .....................................
self.blockCUDA = (self.X_gridDIM,1,1)
self.gridCUDA = (self.Y_gridDIM,1)
timeRange = range(1, self.timeSteps+1)
initial_time = time.time()
X_average = []
Y_average = []
Alpha1_average = []
Alpha2_average = []
Beta_average = []
KEnergy_average = []
Potential_0_average = []
self.Normalize_X_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
for t_index in timeRange:
t_GPU = np.float64(self.dt * t_index )
X_average.append( self.Average_X( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Y_average.append( self.Average_Y( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Alpha1_average.append( self.Average_Alpha1( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Alpha2_average.append( self.Average_Alpha2( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Beta_average.append( self.Average_Beta( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
Potential_0_average.append(
self.Potential_0_Average( _Psi1_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU ,t_GPU) )
self.Fourier_4_X_To_P_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
#..................................................
# Kinetic
#..................................................
self.Normalize_P_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
KEnergy_average.append(
self.Average_KEnergy( _Psi1_GPU, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU) )
self.DiracPropagatorK( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
self.Fourier_4_P_To_X_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
#..............................................
# Mass potential
#..............................................
self.DiracPropagatorA( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
t_GPU, block=self.blockCUDA, grid=self.gridCUDA )
# Absorbing boundary
self.DiracAbsorbBoundary_xy(
Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#
# Normalization
#
self.Normalize_X_GPU( Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
# Saving files
if t_index % self.skipFrames == 0:
if self.frameSaveMode=='Spinor':
self.save_Spinor( f1,t_index,Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU)
if self.frameSaveMode=='Density':
self.save_Density(f1,t_index,Psi1_GPU,Psi2_GPU,Psi3_GPU,Psi4_GPU)
final_time = time.time()
print ' computational time = ', final_time - initial_time
f1.close()
self.Psi_end = np.array( [ Psi1_GPU.get(), Psi2_GPU.get(), Psi3_GPU.get(), Psi4_GPU.get() ] )
self.Psi_init = np.array( [ self.Psi1_init, self.Psi2_init, self.Psi3_init, self.Psi4_init ] )
self.timeRange = np.array(timeRange)
self.X_average = np.array(X_average).real
self.Y_average = np.array(Y_average).real
self.Alpha1_average = np.array(Alpha1_average).real
self.Alpha2_average = np.array(Alpha2_average).real
self.Beta_average = np.array(Beta_average).real
self.KEnergy_average = np.array( KEnergy_average ).real
self.Potential_0_average = np.array( Potential_0_average ).real
return 0
| |
import pytest
import os
from glob import glob
from backports.tempfile import TemporaryDirectory
import pickle
import time
from velox import VeloxObject, register_object, load_velox_object
from velox.exceptions import VeloxCreationError, VeloxConstraintError
from velox.tools import timestamp
from velox_test_utils import create_class, RESET
import logging
logging.basicConfig(level=logging.DEBUG)
def test_inconsistent_load_type():
@register_object(registered_name='inconsistent')
class InconsistentTypedModel(VeloxObject):
def __init__(self):
super(InconsistentTypedModel, self).__init__()
self.foo = 'bar'
def _save(self, fileobject):
pickle.dump(self.foo, fileobject)
@classmethod
def _load(cls, fileobject):
return 'this is an inconsistent return type'
with TemporaryDirectory() as d:
InconsistentTypedModel().save(prefix=d)
with pytest.raises(TypeError):
o = InconsistentTypedModel.load(prefix=d)
RESET()
def test_missing_super_class_init():
@register_object(registered_name='missing')
class MissingInit(VeloxObject):
def __init__(self):
pass
def _save(self, fileobject):
pass
@classmethod
def _load(cls, fileobject):
return cls()
def method(self):
pass
with pytest.raises(VeloxCreationError):
MissingInit().method()
RESET()
@register_object(
registered_name='veloxmodel',
version='0.1.0'
)
class VeloxModel(VeloxObject):
def __init__(self, o=None):
super(VeloxModel, self).__init__()
self._o = o
def _save(self, fileobject):
pickle.dump(self, fileobject)
@classmethod
def _load(cls, fileobject):
return pickle.load(fileobject)
def test_load_save_self():
with TemporaryDirectory() as d:
VeloxModel({1: 2}).save(prefix=d)
o = VeloxModel.load(prefix=d)
assert o._o[1] == 2
RESET()
def test_correct_definition():
CorrectModel = create_class('correctmodel')
_ = CorrectModel()
assert True
RESET()
def test_missing_registration():
class IncorrectModel(VeloxObject):
def __init__(self, clf=None):
super(IncorrectModel, self).__init__()
self._clf = clf
def _save(self, fileobject):
pickle.dump(self, fileobject)
@classmethod
def _load(cls, fileobject):
return pickle.load(fileobject)
with pytest.raises(VeloxCreationError):
_ = IncorrectModel()
def test_double_registration():
FirstModel = create_class('foobar')
with pytest.raises(VeloxCreationError):
SecondModel = create_class('foobar')
RESET()
def test_prefix_defaults():
from velox.obj import _default_prefix
with TemporaryDirectory() as d:
assert _default_prefix() == os.path.abspath('.')
os.environ['VELOX_ROOT'] = d
assert _default_prefix() == d
del os.environ['VELOX_ROOT']
def test_basic_saving_loading():
Model = create_class('foobar')
with TemporaryDirectory() as d:
m = Model({})
p = m.save(prefix=d)
assert len(glob(os.path.join(d, '*'))) == 1
assert os.path.split(p)[0] == d
m2 = Model({'foo': 'bar'})
_ = m2.save(prefix=d)
assert len(glob(os.path.join(d, '*'))) == 2
o = Model.load(prefix=d)
assert o._o['foo'] == 'bar'
RESET()
def test_reloading():
Model = create_class('foobar')
with TemporaryDirectory() as d:
m = Model({'foo': 'bar'})
p = m.save(prefix=d)
o = Model({})
assert o.current_sha is None
o.reload(prefix=d, scheduled=True, seconds=0.5)
time.sleep(0.75)
cur_sha = o.current_sha
assert cur_sha is not None
assert o.obj()['foo'] == 'bar'
Model({'foo': 'baz'}).save(prefix=d)
time.sleep(1)
assert cur_sha != o.current_sha
assert o.obj()['foo'] == 'baz'
with pytest.raises(ValueError):
o.current_sha = 'foo'
o.cancel_scheduled_reload()
with pytest.raises(ValueError):
o.cancel_scheduled_reload()
RESET()
def test_local_cache_save_on_load():
RESET()
Model = create_class('foobar')
import os
with TemporaryDirectory() as prefix_dir:
with TemporaryDirectory() as cache_dir:
m = Model({'foo': 'bar'})
p = m.save(prefix=prefix_dir)
_ = Model.load(prefix=prefix_dir, local_cache_dir=cache_dir)
filename = os.path.basename(p)
assert os.path.isfile(os.path.join(cache_dir, filename))
RESET()
def test_local_cache_load():
Model = create_class('foobar')
import os
with TemporaryDirectory() as prefix_dir:
with TemporaryDirectory() as cache_dir:
m = Model({'foo': 'bar'})
p = m.save(prefix=prefix_dir)
_ = Model.load(prefix=prefix_dir, local_cache_dir=cache_dir)
# _ = Model.load(prefix=prefix_dir, local_cache_dir=cache_dir)
filename = os.path.basename(p)
# if the load function is trying to load from the cache, this
# causes an unpickling error
with open(os.path.join(cache_dir, filename), 'w+') as fp:
fp.write('0000000000')
with pytest.raises(Exception):
_ = Model.load(prefix=prefix_dir, local_cache_dir=cache_dir)
# this should be fine, and should not raise an error
_ = Model.load(prefix=prefix_dir)
RESET()
def test_version_constraints():
ModelA = create_class('foobar', version='0.2.1', constraints='<1.0.0')
ModelB = create_class('foobar', version='0.3.0')
ModelC = create_class('foobar', version='1.0.0', constraints='>=0.3.0')
with TemporaryDirectory() as d:
ModelA({'foo': 'bar'}).save(prefix=d)
_ = ModelB.load(prefix=d)
with pytest.raises(VeloxConstraintError):
_ = ModelC.load(prefix=d)
ModelB({'foo': 'baz'}).save(prefix=d)
o = ModelA.load(prefix=d)
assert o.obj()['foo'] == 'baz'
RESET()
def test_nothing_to_reload():
Model = create_class('foobar')
with TemporaryDirectory() as d:
with pytest.raises(VeloxConstraintError):
_ = Model.load(prefix=d)
with TemporaryDirectory() as d:
o = Model()
o.reload(prefix=d, scheduled=True, seconds=1)
time.sleep(1.2)
RESET()
def test_raises_on_empty_file(tmpdir):
Model = create_class('foobar')
prefix = str(tmpdir.mkdir('sub'))
Model({'foo': 'bar'}).save(prefix=prefix)
filename = glob(os.path.join(prefix, '*'))[0]
# delete the content of the file
with open(filename, "w"):
pass
with pytest.raises(VeloxConstraintError):
_ = Model.load(prefix=prefix)
RESET()
| |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test elastic search using the synchronizer, i.e. as it would be used by an
user
"""
import time
import os
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path[0:0] = [""]
from elasticsearch import Elasticsearch
from pymongo import MongoClient
from tests import elastic_pair, mongo_host, STRESS_COUNT
from tests.setup_cluster import (start_replica_set,
kill_replica_set,
restart_mongo_proc,
kill_mongo_proc)
from mongo_connector.doc_managers.elastic_doc_manager import DocManager
from mongo_connector.connector import Connector
from mongo_connector.util import retry_until_ok
from pymongo.errors import OperationFailure, AutoReconnect
from tests.util import assert_soon
class ElasticsearchTestCase(unittest.TestCase):
"""Base class for all ES TestCases."""
@classmethod
def setUpClass(cls):
cls.elastic_conn = Elasticsearch(hosts=[elastic_pair])
cls.elastic_doc = DocManager(elastic_pair,
auto_commit_interval=0)
def setUp(self):
# Create target index in elasticsearch
self.elastic_conn.indices.create(index='test.test')
self.elastic_conn.cluster.health(wait_for_status='yellow',
index='test.test')
def tearDown(self):
self.elastic_conn.indices.delete(index='test.test', ignore=404)
def _search(self):
return self.elastic_doc._stream_search(
index="test.test",
body={"query": {"match_all": {}}}
)
def _count(self):
return self.elastic_conn.count(index='test.test')['count']
def _remove(self):
self.elastic_conn.indices.delete_mapping(
index="test.test",
doc_type=self.elastic_doc.doc_type
)
self.elastic_conn.indices.refresh(index="test.test")
class TestElastic(ElasticsearchTestCase):
""" Tests the Elastic instance
"""
@classmethod
def setUpClass(cls):
""" Starts the cluster
"""
super(TestElastic, cls).setUpClass()
_, cls.secondary_p, cls.primary_p = start_replica_set('test-elastic')
cls.conn = MongoClient(mongo_host, cls.primary_p,
replicaSet='test-elastic')
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
kill_replica_set('test-elastic')
def tearDown(self):
""" Ends the connector
"""
super(TestElastic, self).tearDown()
self.connector.join()
def setUp(self):
""" Starts a new connector for every test
"""
super(TestElastic, self).setUp()
try:
os.unlink("config.txt")
except OSError:
pass
open("config.txt", "w").close()
self.connector = Connector(
address='%s:%s' % (mongo_host, self.primary_p),
oplog_checkpoint='config.txt',
target_url=elastic_pair,
ns_set=['test.test'],
u_key='_id',
auth_key=None,
doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py',
auto_commit_interval=0
)
self.conn.test.test.drop()
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
assert_soon(lambda: self._count() == 0)
def test_shard_length(self):
"""Tests the shard_length to see if the shard set was recognized
properly
"""
self.assertEqual(len(self.connector.shard_set), 1)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: self._count() > 0)
result_set_1 = list(self._search())
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: self._count() == 1)
self.conn['test']['test'].remove({'name': 'paulie'})
assert_soon(lambda: self._count() != 1)
self.assertEqual(self._count(), 0)
def test_rollback(self):
"""Tests rollback. We force a rollback by adding a doc, killing the
primary, adding another doc, killing the new primary, and then
restarting both.
"""
primary_conn = MongoClient(mongo_host, self.primary_p)
self.conn['test']['test'].insert({'name': 'paul'})
condition1 = lambda: self.conn['test']['test'].find(
{'name': 'paul'}).count() == 1
condition2 = lambda: self._count() == 1
assert_soon(condition1)
assert_soon(condition2)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin = new_primary_conn['admin']
assert_soon(lambda: admin.command("isMaster")['ismaster'])
time.sleep(5)
retry_until_ok(self.conn.test.test.insert,
{'name': 'pauline'})
assert_soon(lambda: self._count() == 2)
result_set_1 = list(self._search())
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 2)
#make sure pauline is there
for item in result_set_1:
if item['name'] == 'pauline':
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
restart_mongo_proc(self.secondary_p)
time.sleep(2)
result_set_1 = list(self._search())
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['name'], 'paul')
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), 1)
def test_stress(self):
"""Test stress by inserting and removing a large number of documents"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
time.sleep(5)
condition = lambda: self._count() == STRESS_COUNT
assert_soon(condition)
self.assertEqual(
set('Paul ' + str(i) for i in range(STRESS_COUNT)),
set(item['name'] for item in self._search())
)
def test_stressed_rollback(self):
"""Test stressed rollback with number of documents equal to specified
in global variable. Strategy for rollback is the same as before.
"""
for i in range(0, STRESS_COUNT):
self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
condition = lambda: self._count() == STRESS_COUNT
assert_soon(condition)
primary_conn = MongoClient(mongo_host, self.primary_p)
kill_mongo_proc(self.primary_p, destroy=False)
new_primary_conn = MongoClient(mongo_host, self.secondary_p)
admin = new_primary_conn['admin']
assert_soon(lambda: admin.command("isMaster")['ismaster'])
time.sleep(5)
count = -1
while count + 1 < STRESS_COUNT:
try:
count += 1
self.conn['test']['test'].insert(
{'name': 'Pauline ' + str(count)})
except (OperationFailure, AutoReconnect):
time.sleep(1)
assert_soon(lambda: self._count()
== self.conn['test']['test'].find().count())
result_set_1 = self._search()
for item in result_set_1:
if 'Pauline' in item['name']:
result_set_2 = self.conn['test']['test'].find_one(
{'name': item['name']})
self.assertEqual(item['_id'], str(result_set_2['_id']))
kill_mongo_proc(self.secondary_p, destroy=False)
restart_mongo_proc(self.primary_p)
db_admin = primary_conn["admin"]
assert_soon(lambda: db_admin.command("isMaster")['ismaster'])
restart_mongo_proc(self.secondary_p)
search = self._search
condition = lambda: sum(1 for _ in search()) == STRESS_COUNT
assert_soon(condition)
result_set_1 = list(self._search())
self.assertEqual(len(result_set_1), STRESS_COUNT)
for item in result_set_1:
self.assertTrue('Paul' in item['name'])
find_cursor = retry_until_ok(self.conn['test']['test'].find)
self.assertEqual(retry_until_ok(find_cursor.count), STRESS_COUNT)
if __name__ == '__main__':
unittest.main()
| |
"""Undocumented Module"""
__all__ = ['Messenger']
from PythonUtil import *
from direct.directnotify import DirectNotifyGlobal
import types
from panda3d.core import ConfigVariableBool
# If using the Toontown ActiveX launcher, this must be set true.
# Also, Panda must be compiled with SIMPLE_THREADS or no HAVE_THREADS
# at all. In the normal Panda case, this should be set false.
if ConfigVariableBool('delay-messenger-lock', False).getValue():
class Lock:
""" This is a cheesy delayed implementation of Lock, designed to
support the Toontown ActiveX launch, which must import Messenger
before it has downloaded the rest of Panda. Note that this
cheesy lock isn't thread-safe if the application starts any
threads before acquiring the Messenger lock the first time.
(However, it's mostly thread-safe if Panda is compiled with
SIMPLE_THREADS.) """
notify = DirectNotifyGlobal.directNotify.newCategory("Messenger.Lock")
def __init__(self):
self.locked = 0
def acquire(self):
# Before we download Panda, we can't use any threading
# interfaces. So don't, until we observe that we have some
# actual contention on the lock.
if self.locked:
# We have contention.
return self.__getLock()
# This relies on the fact that any individual Python statement
# is atomic.
self.locked += 1
if self.locked > 1:
# Whoops, we have contention.
self.locked -= 1
return self.__getLock()
def release(self):
if self.locked:
# Still using the old, cheesy lock.
self.locked -= 1
return
# The new lock must have been put in place.
self.release = self.lock.release
return self.lock.release()
def __getLock(self):
# Now that we've started Panda, it's safe to import the Mutex
# class, which becomes our actual lock.
# From now on, this lock will be used.
self.notify.info("Acquiring Panda lock for the first time.")
from pandac.PandaModules import Thread, Mutex
self.__dict__.setdefault('lock', Mutex('Messenger'))
self.lock.acquire()
self.acquire = self.lock.acquire
# Wait for the cheesy lock to be released before we return.
self.notify.info("Waiting for cheesy lock to be released.")
while self.locked:
Thread.forceYield()
self.notify.info("Got cheesy lock.")
# We return with the lock acquired.
else:
# In the normal case, there's no reason not to import all of
# libpanda right away, and so we can just use Lock directly. This
# is perfectly thread-safe.
from direct.stdpy.threading import Lock
class Messenger:
notify = DirectNotifyGlobal.directNotify.newCategory("Messenger")
def __init__(self):
"""
One is keyed off the event name. It has the following structure:
{event1: {object1: [method, extraArgs, persistent],
object2: [method, extraArgs, persistent]},
event2: {object1: [method, extraArgs, persistent],
object2: [method, extraArgs, persistent]}}
This dictionary allow for efficient callbacks when the messenger
hears an event.
A second dictionary remembers which objects are accepting which
events. This allows for efficient ignoreAll commands.
Or, for an example with more real data:
{'mouseDown': {avatar: [avatar.jump, [2.0], 1]}}
"""
# eventName->objMsgrId->callbackInfo
self.__callbacks = {}
# objMsgrId->set(eventName)
self.__objectEvents = {}
self._messengerIdGen = 0
# objMsgrId->listenerObject
self._id2object = {}
# A mapping of taskChain -> eventList, used for sending events
# across task chains (and therefore across threads).
self._eventQueuesByTaskChain = {}
# This protects the data structures within this object from
# multithreaded access.
self.lock = Lock()
if __debug__:
self.__isWatching=0
self.__watching={}
# I'd like this to be in the __debug__, but I fear that someone will
# want this in a release build. If you're sure that that will not be
# then please remove this comment and put the quiet/verbose stuff
# under __debug__.
self.quieting={"NewFrame":1,
"avatarMoving":1,
"event-loop-done":1,
'collisionLoopFinished':1,
} # see def quiet()
def _getMessengerId(self, object):
# TODO: allocate this id in DirectObject.__init__ and get derived
# classes to call down (speed optimization, assuming objects
# accept/ignore more than once over their lifetime)
# get unique messenger id for this object
# assumes lock is held.
if not hasattr(object, '_MSGRmessengerId'):
object._MSGRmessengerId = (object.__class__.__name__, self._messengerIdGen)
self._messengerIdGen += 1
return object._MSGRmessengerId
def _storeObject(self, object):
# store reference-counted reference to object in case we need to
# retrieve it later. assumes lock is held.
id = self._getMessengerId(object)
if id not in self._id2object:
self._id2object[id] = [1, object]
else:
self._id2object[id][0] += 1
def _getObject(self, id):
return self._id2object[id][1]
def _getObjects(self):
self.lock.acquire()
try:
objs = []
for refCount, obj in self._id2object.itervalues():
objs.append(obj)
return objs
finally:
self.lock.release()
def _getNumListeners(self, event):
return len(self.__callbacks.get(event, {}))
def _getEvents(self):
return self.__callbacks.keys()
def _releaseObject(self, object):
# assumes lock is held.
id = self._getMessengerId(object)
if id in self._id2object:
record = self._id2object[id]
record[0] -= 1
if record[0] <= 0:
del self._id2object[id]
def accept(self, event, object, method, extraArgs=[], persistent=1):
""" accept(self, string, DirectObject, Function, List, Boolean)
Make this object accept this event. When the event is
sent (using Messenger.send or from C++), method will be executed,
optionally passing in extraArgs.
If the persistent flag is set, it will continue to respond
to this event, otherwise it will respond only once.
"""
notifyDebug = Messenger.notify.getDebug()
if notifyDebug:
Messenger.notify.debug(
"object: %s (%s)\n accepting: %s\n method: %s\n extraArgs: %s\n persistent: %s" %
(safeRepr(object), self._getMessengerId(object), event, safeRepr(method),
safeRepr(extraArgs), persistent))
# Make sure that the method is callable
assert hasattr(method, '__call__'), (
"method not callable in accept (ignoring): %s %s"%
(safeRepr(method), safeRepr(extraArgs)))
# Make sure extraArgs is a list or tuple
if not (isinstance(extraArgs, list) or isinstance(extraArgs, tuple) or isinstance(extraArgs, set)):
raise TypeError, "A list is required as extraArgs argument"
self.lock.acquire()
try:
acceptorDict = self.__callbacks.setdefault(event, {})
id = self._getMessengerId(object)
# Make sure we are not inadvertently overwriting an existing event
# on this particular object.
if id in acceptorDict:
# TODO: we're replacing the existing callback. should this be an error?
if notifyDebug:
oldMethod = acceptorDict[id][0]
if oldMethod == method:
self.notify.warning(
"object: %s was already accepting: \"%s\" with same callback: %s()" %
(object.__class__.__name__, safeRepr(event), method.__name__))
else:
self.notify.warning(
"object: %s accept: \"%s\" new callback: %s() supplanting old callback: %s()" %
(object.__class__.__name__, safeRepr(event), method.__name__, oldMethod.__name__))
acceptorDict[id] = [method, extraArgs, persistent]
# Remember that this object is listening for this event
eventDict = self.__objectEvents.setdefault(id, {})
if event not in eventDict:
self._storeObject(object)
eventDict[event] = None
finally:
self.lock.release()
def ignore(self, event, object):
""" ignore(self, string, DirectObject)
Make this object no longer respond to this event.
It is safe to call even if it was not already accepting
"""
if Messenger.notify.getDebug():
Messenger.notify.debug(
safeRepr(object) + ' (%s)\n now ignoring: ' % (self._getMessengerId(object), ) + safeRepr(event))
self.lock.acquire()
try:
id = self._getMessengerId(object)
# Find the dictionary of all the objects accepting this event
acceptorDict = self.__callbacks.get(event)
# If this object is there, delete it from the dictionary
if acceptorDict and id in acceptorDict:
del acceptorDict[id]
# If this dictionary is now empty, remove the event
# entry from the Messenger alltogether
if (len(acceptorDict) == 0):
del self.__callbacks[event]
# This object is no longer listening for this event
eventDict = self.__objectEvents.get(id)
if eventDict and event in eventDict:
del eventDict[event]
if (len(eventDict) == 0):
del self.__objectEvents[id]
self._releaseObject(object)
finally:
self.lock.release()
def ignoreAll(self, object):
"""
Make this object no longer respond to any events it was accepting
Useful for cleanup
"""
if Messenger.notify.getDebug():
Messenger.notify.debug(
safeRepr(object) + ' (%s)\n now ignoring all events' % (self._getMessengerId(object), ))
self.lock.acquire()
try:
id = self._getMessengerId(object)
# Get the list of events this object is listening to
eventDict = self.__objectEvents.get(id)
if eventDict:
for event in eventDict.keys():
# Find the dictionary of all the objects accepting this event
acceptorDict = self.__callbacks.get(event)
# If this object is there, delete it from the dictionary
if acceptorDict and id in acceptorDict:
del acceptorDict[id]
# If this dictionary is now empty, remove the event
# entry from the Messenger alltogether
if (len(acceptorDict) == 0):
del self.__callbacks[event]
self._releaseObject(object)
del self.__objectEvents[id]
finally:
self.lock.release()
def getAllAccepting(self, object):
"""
Returns the list of all events accepted by the indicated object.
"""
self.lock.acquire()
try:
id = self._getMessengerId(object)
# Get the list of events this object is listening to
eventDict = self.__objectEvents.get(id)
if eventDict:
return eventDict.keys()
return []
finally:
self.lock.release()
def isAccepting(self, event, object):
""" isAccepting(self, string, DirectOject)
Is this object accepting this event?
"""
self.lock.acquire()
try:
acceptorDict = self.__callbacks.get(event)
id = self._getMessengerId(object)
if acceptorDict and id in acceptorDict:
# Found it, return true
return 1
# If we looked in both dictionaries and made it here
# that object must not be accepting that event.
return 0
finally:
self.lock.release()
def whoAccepts(self, event):
"""
Return objects accepting the given event
"""
return self.__callbacks.get(event)
def isIgnoring(self, event, object):
""" isIgnorning(self, string, DirectObject)
Is this object ignoring this event?
"""
return (not self.isAccepting(event, object))
def send(self, event, sentArgs=[], taskChain = None):
"""
Send this event, optionally passing in arguments
event is usually a string.
sentArgs is a list of any data that you want passed along to the
handlers listening to this event.
If taskChain is not None, it is the name of the task chain
which should receive the event. If taskChain is None, the
event is handled immediately. Setting a non-None taskChain
will defer the event (possibly till next frame or even later)
and create a new, temporary task within the named taskChain,
but this is the only way to send an event across threads.
"""
if Messenger.notify.getDebug() and not self.quieting.get(event):
assert Messenger.notify.debug(
'sent event: %s sentArgs = %s, taskChain = %s' % (
event, sentArgs, taskChain))
self.lock.acquire()
try:
foundWatch=0
if __debug__:
if self.__isWatching:
for i in self.__watching.keys():
if str(event).find(i) >= 0:
foundWatch=1
break
acceptorDict = self.__callbacks.get(event)
if not acceptorDict:
if __debug__:
if foundWatch:
print "Messenger: \"%s\" was sent, but no function in Python listened."%(event,)
return
if taskChain:
# Queue the event onto the indicated task chain.
from direct.task.TaskManagerGlobal import taskMgr
queue = self._eventQueuesByTaskChain.setdefault(taskChain, [])
queue.append((acceptorDict, event, sentArgs, foundWatch))
if len(queue) == 1:
# If this is the first (only) item on the queue,
# spawn the task to empty it.
taskMgr.add(self.__taskChainDispatch, name = 'Messenger-%s' % (taskChain),
extraArgs = [taskChain], taskChain = taskChain,
appendTask = True)
else:
# Handle the event immediately.
self.__dispatch(acceptorDict, event, sentArgs, foundWatch)
finally:
self.lock.release()
def __taskChainDispatch(self, taskChain, task):
""" This task is spawned each time an event is sent across
task chains. Its job is to empty the task events on the queue
for this particular task chain. This guarantees that events
are still delivered in the same order they were sent. """
while True:
eventTuple = None
self.lock.acquire()
try:
queue = self._eventQueuesByTaskChain.get(taskChain, None)
if queue:
eventTuple = queue[0]
del queue[0]
if not queue:
# The queue is empty, we're done.
if queue is not None:
del self._eventQueuesByTaskChain[taskChain]
if not eventTuple:
# No event; we're done.
return task.done
self.__dispatch(*eventTuple)
finally:
self.lock.release()
return task.done
def __dispatch(self, acceptorDict, event, sentArgs, foundWatch):
for id in acceptorDict.keys():
# We have to make this apparently redundant check, because
# it is possible that one object removes its own hooks
# in response to a handler called by a previous object.
#
# NOTE: there is no danger of skipping over objects due to
# modifications to acceptorDict, since the for..in above
# iterates over a list of objects that is created once at
# the start
callInfo = acceptorDict.get(id)
if callInfo:
method, extraArgs, persistent = callInfo
# If this object was only accepting this event once,
# remove it from the dictionary
if not persistent:
# This object is no longer listening for this event
eventDict = self.__objectEvents.get(id)
if eventDict and event in eventDict:
del eventDict[event]
if (len(eventDict) == 0):
del self.__objectEvents[id]
self._releaseObject(self._getObject(id))
del acceptorDict[id]
# If the dictionary at this event is now empty, remove
# the event entry from the Messenger altogether
if (event in self.__callbacks \
and (len(self.__callbacks[event]) == 0)):
del self.__callbacks[event]
if __debug__:
if foundWatch:
print "Messenger: \"%s\" --> %s%s"%(
event,
self.__methodRepr(method),
tuple(extraArgs + sentArgs))
#print "Messenger: \"%s\" --> %s%s"%(
# event,
# self.__methodRepr(method),
# tuple(extraArgs + sentArgs))
# It is important to make the actual call here, after
# we have cleaned up the accept hook, because the
# method itself might call accept() or acceptOnce()
# again.
assert hasattr(method, '__call__')
# Release the lock temporarily while we call the method.
self.lock.release()
try:
method (*(extraArgs + sentArgs))
finally:
self.lock.acquire()
def clear(self):
"""
Start fresh with a clear dict
"""
self.lock.acquire()
try:
self.__callbacks.clear()
self.__objectEvents.clear()
self._id2object.clear()
finally:
self.lock.release()
def isEmpty(self):
return (len(self.__callbacks) == 0)
def getEvents(self):
return self.__callbacks.keys()
def replaceMethod(self, oldMethod, newFunction):
"""
This is only used by Finder.py - the module that lets
you redefine functions with Control-c-Control-v
"""
retFlag = 0
for entry in self.__callbacks.items():
event, objectDict = entry
for objectEntry in objectDict.items():
object, params = objectEntry
method = params[0]
if (type(method) == types.MethodType):
function = method.im_func
else:
function = method
#print ('function: ' + repr(function) + '\n' +
# 'method: ' + repr(method) + '\n' +
# 'oldMethod: ' + repr(oldMethod) + '\n' +
# 'newFunction: ' + repr(newFunction) + '\n')
if (function == oldMethod):
newMethod = types.MethodType(
newFunction, method.im_self, method.im_class)
params[0] = newMethod
# Found it retrun true
retFlag += 1
# didn't find that method, return false
return retFlag
def toggleVerbose(self):
isVerbose = 1 - Messenger.notify.getDebug()
Messenger.notify.setDebug(isVerbose)
if isVerbose:
print "Verbose mode true. quiet list = %s"%(
self.quieting.keys(),)
if __debug__:
def watch(self, needle):
"""
return a matching event (needle) if found (in haystack).
This is primarily a debugging tool.
This is intended for debugging use only.
This function is not defined if python is ran with -O (optimize).
See Also: unwatch
"""
if not self.__watching.get(needle):
self.__isWatching += 1
self.__watching[needle]=1
def unwatch(self, needle):
"""
return a matching event (needle) if found (in haystack).
This is primarily a debugging tool.
This is intended for debugging use only.
This function is not defined if python is ran with -O (optimize).
See Also: watch
"""
if self.__watching.get(needle):
self.__isWatching -= 1
del self.__watching[needle]
def quiet(self, message):
"""
When verbose mode is on, don't spam the output with messages
marked as quiet.
This is primarily a debugging tool.
This is intended for debugging use only.
This function is not defined if python is ran with -O (optimize).
See Also: unquiet
"""
if not self.quieting.get(message):
self.quieting[message]=1
def unquiet(self, message):
"""
Remove a message from the list of messages that are not reported
in verbose mode.
This is primarily a debugging tool.
This is intended for debugging use only.
This function is not defined if python is ran with -O (optimize).
See Also: quiet
"""
if self.quieting.get(message):
del self.quieting[message]
def find(self, needle):
"""
return a matching event (needle) if found (in haystack).
This is primarily a debugging tool.
"""
keys = self.__callbacks.keys()
keys.sort()
for event in keys:
if repr(event).find(needle) >= 0:
print self.__eventRepr(event),
return {event: self.__callbacks[event]}
def findAll(self, needle, limit=None):
"""
return a dict of events (needle) if found (in haystack).
limit may be None or an integer (e.g. 1).
This is primarily a debugging tool.
"""
matches = {}
keys = self.__callbacks.keys()
keys.sort()
for event in keys:
if repr(event).find(needle) >= 0:
print self.__eventRepr(event),
matches[event] = self.__callbacks[event]
# if the limit is not None, decrement and
# check for break:
if limit > 0:
limit -= 1
if limit == 0:
break
return matches
def __methodRepr(self, method):
"""
return string version of class.method or method.
"""
if (type(method) == types.MethodType):
functionName = method.im_class.__name__ + '.' + \
method.im_func.__name__
else:
if hasattr(method, "__name__"):
functionName = method.__name__
else:
return ""
return functionName
def __eventRepr(self, event):
"""
Compact version of event, acceptor pairs
"""
str = event.ljust(32) + '\t'
acceptorDict = self.__callbacks[event]
for key, (method, extraArgs, persistent) in acceptorDict.items():
str = str + self.__methodRepr(method) + ' '
str = str + '\n'
return str
def __repr__(self):
"""
Compact version of event, acceptor pairs
"""
str = "The messenger is currently handling:\n" + "="*64 + "\n"
keys = self.__callbacks.keys()
keys.sort()
for event in keys:
str += self.__eventRepr(event)
# Print out the object: event dictionary too
str += "="*64 + "\n"
for key, eventDict in self.__objectEvents.items():
object = self._getObject(key)
str += "%s:\n" % repr(object)
for event in eventDict.keys():
str += " %s\n" % repr(event)
str += "="*64 + "\n" + "End of messenger info.\n"
return str
def detailedRepr(self):
"""
Print out the table in a detailed readable format
"""
import types
str = 'Messenger\n'
str = str + '='*50 + '\n'
keys = self.__callbacks.keys()
keys.sort()
for event in keys:
acceptorDict = self.__callbacks[event]
str = str + 'Event: ' + event + '\n'
for key in acceptorDict.keys():
function, extraArgs, persistent = acceptorDict[key]
object = self._getObject(key)
if (type(object) == types.InstanceType):
className = object.__class__.__name__
else:
className = "Not a class"
functionName = function.__name__
str = (str + '\t' +
'Acceptor: ' + className + ' instance' + '\n\t' +
'Function name:' + functionName + '\n\t' +
'Extra Args: ' + repr(extraArgs) + '\n\t' +
'Persistent: ' + repr(persistent) + '\n')
# If this is a class method, get its actual function
if (type(function) == types.MethodType):
str = (str + '\t' +
'Method: ' + repr(function) + '\n\t' +
'Function: ' + repr(function.im_func) + '\n')
else:
str = (str + '\t' +
'Function: ' + repr(function) + '\n')
str = str + '='*50 + '\n'
return str
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VaultsOperations(object):
"""VaultsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
vault_name, # type: str
parameters, # type: "_models.VaultCreateOrUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Vault"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VaultCreateOrUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Vault', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vault_name, # type: str
parameters, # type: "_models.VaultCreateOrUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Vault"]
"""Create or update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
:type resource_group_name: str
:param vault_name: Name of the vault.
:type vault_name: str
:param parameters: Parameters to create or update the vault.
:type parameters: ~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultCreateOrUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_04_01_preview.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
vault_name, # type: str
parameters, # type: "_models.VaultPatchParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Vault"
"""Update a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the server belongs.
:type resource_group_name: str
:param vault_name: Name of the vault.
:type vault_name: str
:param parameters: Parameters to patch the vault.
:type parameters: ~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultPatchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.Vault
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VaultPatchParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Vault', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the specified Azure key vault.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
:type resource_group_name: str
:param vault_name: The name of the vault to delete.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Vault"
"""Gets the specified Azure key vault.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
:type resource_group_name: str
:param vault_name: The name of the vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.Vault
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}'} # type: ignore
def update_access_policy(
self,
resource_group_name, # type: str
vault_name, # type: str
operation_kind, # type: Union[str, "_models.AccessPolicyUpdateKind"]
parameters, # type: "_models.VaultAccessPolicyParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.VaultAccessPolicyParameters"
"""Update access policies in a key vault in the specified subscription.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
:type resource_group_name: str
:param vault_name: Name of the vault.
:type vault_name: str
:param operation_kind: Name of the operation.
:type operation_kind: str or ~azure.mgmt.keyvault.v2021_04_01_preview.models.AccessPolicyUpdateKind
:param parameters: Access policy to merge into the vault.
:type parameters: ~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultAccessPolicyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VaultAccessPolicyParameters, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultAccessPolicyParameters
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VaultAccessPolicyParameters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_access_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'operationKind': self._serialize.url("operation_kind", operation_kind, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VaultAccessPolicyParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VaultAccessPolicyParameters', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VaultAccessPolicyParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_access_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/accessPolicies/{operationKind}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VaultListResult"]
"""The List operation gets information about the vaults associated with the subscription and
within the specified resource group.
:param resource_group_name: The name of the Resource Group to which the vault belongs.
:type resource_group_name: str
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VaultListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VaultListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VaultListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults'} # type: ignore
def list_by_subscription(
self,
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VaultListResult"]
"""The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VaultListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VaultListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VaultListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/vaults'} # type: ignore
def list_deleted(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DeletedVaultListResult"]
"""Gets information about the deleted vaults in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedVaultListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.DeletedVaultListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedVaultListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_deleted.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedVaultListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_deleted.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/deletedVaults'} # type: ignore
def get_deleted(
self,
vault_name, # type: str
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DeletedVault"
"""Gets the deleted Azure key vault.
:param vault_name: The name of the vault.
:type vault_name: str
:param location: The location of the deleted vault.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedVault, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.DeletedVault
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedVault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get_deleted.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeletedVault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}'} # type: ignore
def _purge_deleted_initial(
self,
vault_name, # type: str
location, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
# Construct URL
url = self._purge_deleted_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_purge_deleted_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}/purge'} # type: ignore
def begin_purge_deleted(
self,
vault_name, # type: str
location, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Permanently deletes the specified vault. aka Purges the deleted Azure key vault.
:param vault_name: The name of the soft-deleted vault.
:type vault_name: str
:param location: The location of the soft-deleted vault.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._purge_deleted_initial(
vault_name=vault_name,
location=location,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_purge_deleted.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/locations/{location}/deletedVaults/{vaultName}/purge'} # type: ignore
def list(
self,
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceListResult"]
"""The List operation gets information about the vaults associated with the subscription.
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_04_01_preview.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
filter = "resourceType eq 'Microsoft.KeyVault/vaults'"
api_version = "2015-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resources'} # type: ignore
def check_name_availability(
self,
vault_name, # type: "_models.VaultCheckNameAvailabilityParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.CheckNameAvailabilityResult"
"""Checks that the vault name is valid and is not already in use.
:param vault_name: The name of the vault.
:type vault_name: ~azure.mgmt.keyvault.v2021_04_01_preview.models.VaultCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_04_01_preview.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vault_name, 'VaultCheckNameAvailabilityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/checkNameAvailability'} # type: ignore
| |
from __future__ import absolute_import
import logging
import os
import shutil
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.index import Link
from pip.locations import (PIP_DELETE_MARKER_FILENAME, build_prefix)
from pip.req.req_install import InstallRequirement
from pip.utils import (display_path, rmtree, dist_in_usersite, call_subprocess,
_make_build_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
from pip.wheel import wheel_ext
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
wheel_download_dir=None):
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
if not install_req.match_markers():
logger.debug("Ignore %s: markers %r don't match",
install_req.name, install_req.markers)
return
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def has_editables(self):
if any(req.editable for req in self.requirements.values()):
return True
if any(req.editable for req in self.unnamed_requirements):
return True
return False
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
# FIXME: duplicates code from prepare_files; relevant code should
# probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(
self.src_dir
)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(
self.build_dir,
not self.is_download,
)
if (req_to_install.source_dir is not None
and not os.path.isdir(req_to_install.source_dir)):
raise InstallationError(
'Could not install requirement %s because source folder %s'
' does not exist (perhaps --no-download was used without '
'first running an equivalent install with --no-install?)' %
(req_to_install, req_to_install.source_dir)
)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
# ############################################# #
# # Search for archive to fulfill requirement # #
# ############################################# #
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound as exc:
not_found = exc
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.info(
'Requirement already up-to-date: %s',
req_to_install,
)
else:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
elif install:
if (req_to_install.url
and req_to_install.url.lower().startswith('file:')):
logger.info(
'Unpacking %s',
display_path(url_to_path(req_to_install.url)),
)
else:
logger.info('Downloading/unpacking %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
# NB: This call can result in the creation of a temporary
# build directory
location = req_to_install.build_location(
self.build_dir,
not self.is_download,
)
unpack = True
url = None
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
if os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, location)
)
else:
# FIXME: this won't upgrade when there's an existing
# package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(
req_to_install,
upgrade=self.upgrade,
)
else:
# FIXME: should req_to_install.url already be a
# link?
url = Link(req_to_install.url)
assert url
if url:
try:
if (
url.filename.endswith(wheel_ext)
and self.wheel_download_dir
):
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
url, location, download_dir,
do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, url)
)
else:
unpack = False
if unpack:
is_wheel = url and url.filename.endswith(wheel_ext)
if self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME:https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
install = False
# ###################### #
# # parse dependencies # #
# ###################### #
if is_wheel:
dist = list(
pkg_resources.find_distributions(location)
)[0]
if not req_to_install.req:
req_to_install.req = dist.as_requirement()
self.add_requirement(req_to_install)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
continue
subreq = InstallRequirement(str(subreq),
req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
# sdists
else:
# FIXME: shouldn't be globally added:
finder.add_dependency_links(
req_to_install.dependency_links
)
if (req_to_install.extras):
logger.info(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
if not self.ignore_dependencies:
for req in req_to_install.requirements(
req_to_install.extras):
try:
name = pkg_resources.Requirement.parse(
req
).project_name
except ValueError as exc:
# FIXME: proper warning
logger.error(
'Invalid requirement: %r (%s) in '
'requirement %s',
req, exc, req_to_install,
)
continue
if self.has_requirement(name):
# FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install)
# cleanup tmp src
if (self.is_download or
req_to_install._temp_build_dir is not None):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.info('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
remove_dir = []
if self._pip_has_created_build_dir():
remove_dir.append(self.build_dir)
for dir in remove_dir:
if os.path.exists(dir):
logger.debug('Removing temporary dir %s...', dir)
rmtree(dir)
def _pip_has_created_build_dir(self):
return (
self.build_dir == build_prefix
and os.path.exists(
os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)
)
)
def copy_to_build_dir(self, req_to_install):
target_dir = req_to_install.editable and self.src_dir or self.build_dir
logger.debug("Copying %s to %s", req_to_install.name, target_dir)
dest = os.path.join(target_dir, req_to_install.name)
shutil.copytree(req_to_install.source_dir, dest)
call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest,
command_desc='python setup.py clean')
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if (req.name == 'distribute'
and req.installed_version in distribute_req):
to_install.remove(req)
to_install.append(req)
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's
# pkg_resources. It's ok *not* to check if setuptools>=0.7
# because if someone were actually trying to ugrade from
# distribute to setuptools 0.6.X, then all this could do is
# actually help, although that upgade path was certainly never
# "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = \
pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = \
pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with
and not requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with
and requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| |
#!/usr/bin/python
import os
from copy import copy
import shapefile
from opensextant import get_country, Country
from opensextant.gazetteer import DataSource, get_default_db, normalize_name, load_stopterms, parse_admin_code, PlaceHeuristics
from opensextant.utility import is_code, is_abbreviation, get_list
#
stopterms = load_stopterms()
NOT_LEXICAL_COMPARABLE_SCRIPT = {"ar", "cjk"}
# RESULT OF ADM1 discovery in pycountry and natural earth.
# Map these descriptive names to standard feature coding in Geonames
admin_features = dict()
with open('etc/gazetteer/feature_adhoc_descriptions.csv', 'r', encoding="UTF-8") as fh:
for featrow in fh:
feature = get_list(featrow, delim=",")
desc = feature[0].lower()
featcode = feature[1].upper()
admin_features[desc] = featcode
GENERATED_BLOCK = 30000000
SUBDIV_GAZ_TEMPLATE = {
"id": "",
"place_id": None,
"name": None,
"feat_class": None,
"feat_code": None,
"FIPS_cc": None,
"cc": None,
"source": "NE",
# Default bias tuning
"name_bias": 0.10,
"id_bias": 0.10,
"name_type": "N",
"name_group": ""
}
def parse_feature_type(r, alt_names, debug=False):
"""
:param r: shapefile row
:param alt_names: list of names.
:param debug: debug
:return: tuple Feature Class and Feature Code e.g., "A", "ADM2" for a county
"""
adm_type = r["type_en"]
if not adm_type:
for nm in alt_names:
tokens = nm.lower().split()
for t in tokens:
if t == "state":
return "A", "ADM1"
if t in {"governorate", "territory", "territories"}:
return "P", "PPLA"
if t in {"zone"}:
return "L", "ZONE"
print("Unknown feature", r["name"], r["ne_id"])
return "P", "PPLA"
adm_type = adm_type.lower()
feat_type = admin_features.get(adm_type)
if not feat_type and "unitary district" in adm_type or "unitary authority" in adm_type:
feat_type = "A/ADM2"
if feat_type:
if debug: print("\tFeature", feat_type, f"({adm_type})")
fc, ft = feat_type.split("/", maxsplit=1)
return fc, ft
print("ADD FEATURE: ", adm_type)
return "A", "UNK"
def derive_abbreviations(nameset):
"""
:param nameset: a set
:return:
"""
N = copy(nameset)
for name in N:
if "." in name and len(name) < 10:
toks = name.replace(" ", "").split(".")
maybe_acronym = True
for t in toks:
if len(t) > 1:
maybe_acronym = False
break
if maybe_acronym:
acronym = "".join(toks).upper()
nameset.add(acronym)
def _approximate_bias(b, name):
"""
Find a reasonable match for the given name when we have existing biases in gazetteer entry.
Otherwise if the name is just long enough it should be rather unique and return a high bias ~ 0.3-0.5
If a name varies in length by a third, we'll approximate the name bias to be similar.
:param b: dict of name:bias
:param name: normalize name
:return:
"""
if name in b:
return b.get(name)
nmlen = len(name)
diff = int(nmlen / 3)
for n in b:
nlen = len(n)
if abs(nmlen - nlen) < diff:
return b.get(n)
if nmlen >= 20:
return 0.40
return 0.05
def dump_features():
import csv
with open("etc/gazetteer/feature_adhoc_descriptions.csv", "w", encoding="UTF-8") as featfile:
writer = csv.writer(featfile)
for k in sorted(admin_features.keys()):
writer.writerow([k, admin_features[k]])
def _schema(shp):
print("Schema")
for f in shp.fields:
print(f[0], f[1])
def assign_admin_levels(geo, country: Country, adm1: str, alt_adm1: str):
"""
NaturalEarth has some odd codings.
For UK/GB it has top level provinces WLS, ENG, SCT, NIR as other codes, but not as ADM1
:param geo:
:param country:
:param adm1:
:param alt_adm1:
:return:
"""
geo["adm1"] = adm1
geo["adm2"] = ""
geo["cc"] = country.cc_iso2
if alt_adm1:
if "GB" == country.cc_iso2:
geo["adm1"] = alt_adm1
geo["adm2"] = adm1
class NatEarthAdminGazetteer(DataSource):
"""
Objective: retrieve useful ALT names and abbreviations for province and administrative boundaries.
"""
def __init__(self, dbf, **kwargs):
DataSource.__init__(self, dbf, **kwargs)
self.source_keys = ["NE"]
self.rate = 1000
self.source_name = "NaturalEarth"
self.estimator = PlaceHeuristics(self.db)
def process_source(self, sourcefile, limit=-1):
"""
:param sourcefile: Shapefile from Natural Earth
:param limit:
:return:
"""
if not os.path.exists(sourcefile):
print("Shapefile not found:", sourcefile)
return
with shapefile.Reader(sourcefile) as adm_gaz:
print(adm_gaz)
# Grab Shapefile DBF schema
flds = adm_gaz.fields
# _schema(adm_gaz)
# Purge previous records for this source.
self.purge()
# Separate available name variants by - Anglo, Chinese/Japanese/Korean, Arabic or General (all else)
anglo_script = set([])
arabic_script = set([])
cjk_script = set([])
general_script = set([])
all_script = set([]) # Every possible name.
count = 0
for row in adm_gaz.records():
self.rowcount += 1
anglo_script.clear()
all_script.clear()
general_script.clear()
arabic_script.clear()
cjk_script.clear()
# Retrieve country object -- just easier to work with than to infer from schema.
cc = row["iso_a2"]
if cc == "-1":
print("Country or area is not clearly addressable")
continue
C = get_country(cc)
if not C:
print("What Country?", cc)
continue
# For a row emit an array of Place objects for each name/code + name type.
# Arizona N (name)
# Ariz. A (abbrev)
# AZ A (abbrev/postal code)
# Metadata across each place is the same as derived from a single row here.
# name_ar, name_cjk are populated separately if source is from those scripts.
#
# Name arrays are used later. Geodetic/geographic data is constant -- but the names vary.
for field_tuple in flds:
f = field_tuple[0]
if f.startswith("name_") and f != "name_len":
nm = row[f]
if not nm:
continue
lang = f.split("_")[1]
for possible_nm in nm.split("|"):
nm2 = normalize_name(possible_nm)
all_script.add(nm2)
if lang == "ar":
arabic_script.add(nm2)
elif lang in {"zh", "ko", "ja"}:
cjk_script.add(nm2)
elif lang == "en":
anglo_script.add(nm2)
else:
general_script.add(nm2)
# Primary names and alternates
alt_names = [row["name"], row["name_alt"], row["gn_name"], row["abbrev"]]
names = set([])
for variant in alt_names:
if variant:
for alt_nm in variant.split("|"):
names.add(normalize_name(alt_nm))
derive_abbreviations(names)
anglo_script.update(names)
# Postal code if given is a "name" but coded as (A)abbreviation
postal = row["postal"]
if postal:
if not postal.isdigit():
anglo_script.add(postal)
all_script.update(anglo_script)
# ADMIN or other code.
gu_a3 = row["gu_a3"]
adm1 = parse_admin_code(row["gn_a1_code"])
if self.debug: print(names, "/", cc, "ADM1=", adm1)
# Geographic codings: Features, location, IDs
labels = {row["woe_label"], row["woe_name"]}
labels.update(all_script)
fc, ft = parse_feature_type(row, labels, debug=self.debug)
plid = row["gns_id"]
if plid == "-1":
plid = None
if plid:
plid = f"N{plid}"
else:
plid = f"NE{row['ne_id']}"
if self.debug: print("Backfill missing GNS ID", names)
# Create template for new entry from this row -- metadata here is constant
geo = copy(SUBDIV_GAZ_TEMPLATE)
self.add_location(geo, row["latitude"], row["longitude"])
geo["place_id"] = plid
assign_admin_levels(geo, C, adm1, alt_adm1=gu_a3)
geo["feat_class"] = fc
geo["feat_code"] = ft
geo["FIPS_cc"] = C.cc_fips
geo["id_bias"] = self.estimator.location_bias(geo)
# Name data here is variable -- so create a new entry for each distinct name.
distinct_names = set([])
for lang, nameset in [("", anglo_script),
("xx", general_script),
("ar", arabic_script),
("cjk", cjk_script)]:
for nm in nameset:
if nm.lower() in distinct_names:
continue
distinct_names.add(nm.lower())
g = geo.copy()
count += 1
g["id"] = GENERATED_BLOCK + count
g["name"] = nm
name_grp = lang
if lang == "xx":
name_grp = ""
g["name_grp"] = name_grp
if is_code(nm):
g["name_type"] = "C"
elif is_abbreviation(nm):
g["name_type"] = "A"
g["name_bias"] = self.estimator.name_bias(nm, fc, ft,
name_group=name_grp, name_type=g["name_type"])
yield g
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("shapefile")
ap.add_argument("--db", default=get_default_db())
ap.add_argument("--debug", action="store_true", default=False)
ap.add_argument("--max", help="maximum rows to process for testing", default=-1)
args = ap.parse_args()
source = NatEarthAdminGazetteer(args.db, debug=args.debug)
source.normalize(args.shapefile, limit=int(args.max))
| |
"""
Quantilization functions and related stuff
"""
from functools import partial
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
_NS_DTYPE, ensure_int64, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False, duplicates='raise'):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or bool, optional
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.23.0
Returns
-------
out : Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* True (default) : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
IntervalIndex : Immutable Index implementing an ordered, sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or Categorical object.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
[bad, good, medium, medium, good, bad]
Categories (3, object): [bad < medium < good]
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 4.0
dtype: float64, array([0, 2, 4, 6, 8]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 0.0
b 1.0
c 2.0
d 3.0
e 3.0
dtype: float64, array([0, 2, 4, 6, 8]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0, 1], NaN, (2, 3], (4, 5]]
Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if np.isinf(mn) or np.isinf(mx):
# GH 24314
raise ValueError('cannot specify integer `bins` when input data '
'contains infinity')
elif mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
if bins.is_overlapping:
raise ValueError('Overlapping IntervalIndex is not accepted.')
else:
if is_datetime64tz_dtype(bins):
bins = np.asarray(bins, dtype=_NS_DTYPE)
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to numeric so that cut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x):
dtype = x.dtype
elif is_datetime64_dtype(x):
x = to_datetime(x)
dtype = np.dtype('datetime64[ns]')
elif is_timedelta64_dtype(x):
x = to_timedelta(x)
dtype = np.dtype('timedelta64[ns]')
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins, skipna=False)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype):
bins = to_datetime(bins.astype(np.int64),
utc=True).tz_convert(dtype.tz)
elif is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64tz_dtype(dtype):
formatter = partial(Timestamp, tz=dtype.tz)
adjust = lambda x: x - Timedelta('1ns')
elif is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex([Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
# Check that the passed array is a Pandas or Numpy object
# We don't want to strip away a Pandas data-type here (e.g. datetimetz)
ndim = getattr(x, 'ndim', None)
if ndim is None:
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Input array must be 1 dimensional")
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name, dtype):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| |
# Copyright (c) 2013 Zelin.io
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import mock
from oslo_concurrency import processutils
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.backup import driver as backup_driver
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import sheepdog
SHEEP_ADDR = '127.0.0.1'
SHEEP_PORT = 7000
class SheepdogDriverTestDataGenerator(object):
def __init__(self):
self.TEST_VOLUME = self._make_fake_volume(self.TEST_VOL_DATA)
def sheepdog_cmd_error(self, cmd, exit_code, stdout, stderr):
return (('(Command: %(cmd)s) '
'(Return Code: %(exit_code)s) '
'(Stdout: %(stdout)s) '
'(Stderr: %(stderr)s)') %
{'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout.replace('\n', '\\n'),
'stderr': stderr.replace('\n', '\\n')})
def _make_fake_volume(self, volume_data):
return fake_volume.fake_volume_obj(context.get_admin_context(),
**volume_data)
def cmd_dog_vdi_create(self, name, size):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'create', name,
'%sG' % size, '-a', SHEEP_ADDR, '-p', str(SHEEP_PORT))
def cmd_dog_vdi_delete(self, name):
return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', name,
'-a', SHEEP_ADDR, '-p', str(SHEEP_PORT))
CMD_DOG_CLUSTER_INFO = ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'cluster',
'info', '-a', SHEEP_ADDR, '-p', str(SHEEP_PORT))
TEST_VOL_DATA = {
'size': 1,
'id': '00000000-0000-0000-0000-000000000001',
'provider_auth': None,
'host': 'host@backendsec#unit_test_pool',
'project_id': 'project',
'provider_location': 'location',
'display_name': 'vol1',
'display_description': 'unit test volume',
'volume_type_id': None,
'consistencygroup_id': None,
}
COLLIE_NODE_INFO = """
0 107287605248 3623897354 3%
Total 107287605248 3623897354 3% 54760833024
"""
COLLIE_CLUSTER_INFO_0_5 = """\
Cluster status: running
Cluster created at Tue Jun 25 19:51:41 2013
Epoch Time Version
2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002]
"""
COLLIE_CLUSTER_INFO_0_6 = """\
Cluster status: running, auto-recovery enabled
Cluster created at Tue Jun 25 19:51:41 2013
Epoch Time Version
2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002]
"""
DOG_CLUSTER_RUNNING = """\
Cluster status: running, auto-recovery enabled
Cluster created at Thu Jun 18 17:24:56 2015
Epoch Time Version [Host:Port:V-Nodes,,,]
2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128,\
127.0.0.1:7002:128]
"""
DOG_CLUSTER_INFO_TO_BE_FORMATTED = """\
Cluster status: Waiting for cluster to be formatted
"""
DOG_CLUSTER_INFO_WAITING_OTHER_NODES = """\
Cluster status: Waiting for other nodes to join cluster
Cluster created at Thu Jun 18 17:24:56 2015
Epoch Time Version [Host:Port:V-Nodes,,,]
2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128]
"""
DOG_CLUSTER_INFO_SHUTTING_DOWN = """\
Cluster status: System is shutting down
"""
DOG_VDI_CREATE_VDI_ALREADY_EXISTS = """\
Failed to create VDI %(vdiname)s: VDI exists already
"""
DOG_VDI_DELETE_VDI_NOT_EXISTS = """\
Failed to open VDI %(vdiname)s (snapshot id: 0 snapshot tag: ): No VDI found
"""
DOG_COMMAND_ERROR_FAIL_TO_CONNECT = """\
failed to connect to 127.0.0.1:7000: Connection refused
failed to connect to 127.0.0.1:7000: Connection refused
Failed to get node list
"""
class FakeImageService(object):
def download(self, context, image_id, path):
pass
class SheepdogIOWrapperTestCase(test.TestCase):
def setUp(self):
super(SheepdogIOWrapperTestCase, self).setUp()
self.volume = {'name': 'volume-2f9b2ff5-987b-4412-a91c-23caaf0d5aff'}
self.snapshot_name = 'snapshot-bf452d80-068a-43d7-ba9f-196cf47bd0be'
self.vdi_wrapper = sheepdog.SheepdogIOWrapper(
self.volume)
self.snapshot_wrapper = sheepdog.SheepdogIOWrapper(
self.volume, self.snapshot_name)
self.execute = mock.MagicMock()
self.mock_object(processutils, 'execute', self.execute)
def test_init(self):
self.assertEqual(self.volume['name'], self.vdi_wrapper._vdiname)
self.assertIsNone(self.vdi_wrapper._snapshot_name)
self.assertEqual(0, self.vdi_wrapper._offset)
self.assertEqual(self.snapshot_name,
self.snapshot_wrapper._snapshot_name)
def test_execute(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.vdi_wrapper._execute(cmd, data)
self.execute.assert_called_once_with(*cmd, process_input=data)
def test_execute_error(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.mock_object(processutils, 'execute',
mock.MagicMock(side_effect=OSError))
args = (cmd, data)
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper._execute,
*args)
def test_read_vdi(self):
self.vdi_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', self.volume['name'], 0, process_input=None)
def test_read_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.read)
def test_write_vdi(self):
data = 'data1'
self.vdi_wrapper.write(data)
self.execute.assert_called_once_with(
'dog', 'vdi', 'write',
self.volume['name'], 0, len(data),
process_input=data)
self.assertEqual(len(data), self.vdi_wrapper.tell())
def test_write_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.write, 'dummy_data')
def test_read_snapshot(self):
self.snapshot_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', '-s', self.snapshot_name,
self.volume['name'], 0,
process_input=None)
def test_seek(self):
self.vdi_wrapper.seek(12345)
self.assertEqual(12345, self.vdi_wrapper.tell())
self.vdi_wrapper.seek(-2345, whence=1)
self.assertEqual(10000, self.vdi_wrapper.tell())
# This results in negative offset.
self.assertRaises(IOError, self.vdi_wrapper.seek, -20000, whence=1)
def test_seek_invalid(self):
seek_num = 12345
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.seek, seek_num)
def test_flush(self):
# flush does nothing.
self.vdi_wrapper.flush()
self.assertFalse(self.execute.called)
def test_fileno(self):
self.assertRaises(IOError, self.vdi_wrapper.fileno)
class SheepdogClientTestCase(test.TestCase):
def setUp(self):
super(SheepdogClientTestCase, self).setUp()
self._cfg = conf.Configuration(None)
self._cfg.sheepdog_store_address = SHEEP_ADDR
self._cfg.sheepdog_store_port = SHEEP_PORT
self.driver = sheepdog.SheepdogDriver(configuration=self._cfg)
db_driver = self.driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
self.driver.db = self.db
self.driver.do_setup(None)
self.test_data = SheepdogDriverTestDataGenerator()
self.client = self.driver.client
self._vdiname = self.test_data.TEST_VOLUME.name
self._vdisize = self.test_data.TEST_VOLUME.size
@mock.patch.object(utils, 'execute')
def test_run_dog_success(self, fake_execute):
args = ('cluster', 'info')
expected_cmd = self.test_data.CMD_DOG_CLUSTER_INFO
fake_execute.return_value = ('', '')
self.client._run_dog(*args)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_command_not_found(self, fake_logger, fake_execute):
args = ('cluster', 'info')
expected_msg = 'No such file or directory'
expected_errno = errno.ENOENT
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_dog, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_operation_not_permitted(self, fake_logger, fake_execute):
args = ('cluster', 'info')
expected_msg = 'Operation not permitted'
expected_errno = errno.EPERM
fake_execute.side_effect = OSError(expected_errno, expected_msg)
self.assertRaises(OSError, self.client._run_dog, *args)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(sheepdog, 'LOG')
def test_run_dog_unknown_error(self, fake_logger, fake_execute):
args = ('cluster', 'info')
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 1
stdout = 'stdout dummy'
stderr = 'stderr dummy'
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = processutils.ProcessExecutionError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogCmdError,
self.client._run_dog, *args)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_success(self, fake_logger, fake_execute):
stdout = self.test_data.DOG_CLUSTER_RUNNING
stderr = ''
expected_cmd = ('cluster', 'info')
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
fake_execute.assert_called_once_with(*expected_cmd)
self.assertTrue(fake_logger.debug.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_check_cluster_status_v0_5(self, fake_execute):
stdout = self.test_data.COLLIE_CLUSTER_INFO_0_5
stderr = ''
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_check_cluster_status_v0_6(self, fake_execute):
stdout = self.test_data.COLLIE_CLUSTER_INFO_0_6
stderr = ''
fake_execute.return_value = (stdout, stderr)
self.client.check_cluster_status()
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_not_formatted(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_TO_BE_FORMATTED
stderr = ''
expected_reason = _('Cluster is not formatted. '
'You should probably perform '
'"dog cluster format".')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_waiting_to_join_cluster(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_WAITING_OTHER_NODES
stderr = ''
expected_reason = _('Waiting for all nodes to join cluster. '
'Ensure all sheep daemons are running.')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_shutting_down(self, fake_logger,
fake_execute):
stdout = self.test_data.DOG_CLUSTER_INFO_SHUTTING_DOWN
stderr = ''
expected_reason = _('Invalid sheepdog cluster status.')
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.check_cluster_status)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_fail_to_connect(self, fake_logger,
fake_execute):
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.check_cluster_status)
self.assertEqual(expected_msg, ex.msg)
self.assertTrue(fake_logger.error.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_check_cluster_status_unknown_error(self, fake_logger,
fake_execute):
cmd = self.test_data.CMD_DOG_CLUSTER_INFO
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stdout_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.check_cluster_status)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_create_success(self, fake_execute):
expected_cmd = ('vdi', 'create', self._vdiname, '%sG' % self._vdisize)
fake_execute.return_value = ('', '')
self.client.create(self._vdiname, self._vdisize)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_fail_to_connect(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize)
exit_code = 2
stdout = ''
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.create,
self._vdiname, self._vdisize)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_vdi_already_exists(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize)
exit_code = 1
stdout = ''
stderr = (self.test_data.DOG_VDI_CREATE_VDI_ALREADY_EXISTS %
{'vdiname': self._vdiname})
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.create,
self._vdiname, self._vdisize)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_create_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize)
exit_code = 1
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(
cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError, self.client.create,
self._vdiname, self._vdisize)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_delete_success(self, fake_execute):
expected_cmd = ('vdi', 'delete', self._vdiname)
fake_execute.return_value = ('', '')
self.client.delete(self._vdiname)
fake_execute.assert_called_once_with(*expected_cmd)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_vdi_not_found(self, fake_logger, fake_execute):
stdout = ''
stderr = (self.test_data.DOG_VDI_DELETE_VDI_NOT_EXISTS %
{'vdiname': self._vdiname})
fake_execute.return_value = (stdout, stderr)
self.client.delete(self._vdiname)
self.assertTrue(fake_logger.warning.called)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_delete_fail_to_connect_bugcase(self, fake_execute):
# NOTE(tishizaki): Sheepdog's bug case.
# details are written to Sheepdog driver code.
stdout = ''
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_reason = (_('Failed to connect to sheep daemon. '
'addr: %(addr)s, port: %(port)s'),
{'addr': SHEEP_ADDR, 'port': SHEEP_PORT})
fake_execute.return_value = (stdout, stderr)
ex = self.assertRaises(exception.SheepdogError,
self.client.delete, self._vdiname)
self.assertEqual(expected_reason, ex.kwargs['reason'])
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_fail_to_connect(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_delete(self._vdiname)
exit_code = 2
stdout = 'stdout_dummy'
stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.delete, self._vdiname)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
@mock.patch.object(sheepdog, 'LOG')
def test_delete_unknown_error(self, fake_logger, fake_execute):
cmd = self.test_data.cmd_dog_vdi_delete(self._vdiname)
exit_code = 2
stdout = 'stdout_dummy'
stderr = 'stderr_dummy'
expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd,
exit_code=exit_code,
stdout=stdout,
stderr=stderr)
fake_execute.side_effect = exception.SheepdogCmdError(
cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'),
stderr=stderr.replace('\n', '\\n'))
ex = self.assertRaises(exception.SheepdogCmdError,
self.client.delete, self._vdiname)
self.assertTrue(fake_logger.error.called)
self.assertEqual(expected_msg, ex.msg)
class SheepdogDriverTestCase(test.TestCase):
def setUp(self):
super(SheepdogDriverTestCase, self).setUp()
self._cfg = conf.Configuration(None)
self._cfg.sheepdog_store_address = SHEEP_ADDR
self._cfg.sheepdog_store_port = SHEEP_PORT
self.driver = sheepdog.SheepdogDriver(configuration=self._cfg)
db_driver = self.driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
self.driver.db = self.db
self.driver.do_setup(None)
self.test_data = SheepdogDriverTestDataGenerator()
self.client = self.driver.client
self._vdiname = self.test_data.TEST_VOLUME.name
self._vdisize = self.test_data.TEST_VOLUME.size
@mock.patch.object(sheepdog.SheepdogClient, 'check_cluster_status')
def test_check_for_setup_error(self, fake_execute):
self.driver.check_for_setup_error()
fake_execute.assert_called_once_with()
@mock.patch.object(sheepdog.SheepdogClient, 'create')
def test_create_volume(self, fake_execute):
self.driver.create_volume(self.test_data.TEST_VOLUME)
fake_execute.assert_called_once_with(self._vdiname, self._vdisize)
@mock.patch.object(sheepdog.SheepdogClient, 'delete')
def test_delete_volume(self, fake_execute):
self.driver.delete_volume(self.test_data.TEST_VOLUME)
fake_execute.assert_called_once_with(self._vdiname)
def test_update_volume_stats(self):
def fake_stats(*args):
return self.test_data.COLLIE_NODE_INFO, ''
self.stubs.Set(self.driver, '_execute', fake_stats)
expected = dict(
volume_backend_name='sheepdog',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='sheepdog',
total_capacity_gb=float(107287605248) / units.Gi,
free_capacity_gb=float(107287605248 - 3623897354) / units.Gi,
reserved_percentage=0,
QoS_support=False)
actual = self.driver.get_volume_stats(True)
self.assertDictMatch(expected, actual)
def test_update_volume_stats_error(self):
def fake_stats(*args):
raise processutils.ProcessExecutionError()
self.stubs.Set(self.driver, '_execute', fake_stats)
expected = dict(
volume_backend_name='sheepdog',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='sheepdog',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
QoS_support=False)
actual = self.driver.get_volume_stats(True)
self.assertDictMatch(expected, actual)
@mock.patch.object(sheepdog.SheepdogClient, '_run_dog')
def test_copy_image_to_volume(self, fake_run_dog):
@contextlib.contextmanager
def fake_temp_file():
class FakeTmp(object):
def __init__(self, name):
self.name = name
yield FakeTmp('test').name
def fake_try_execute(obj, *command, **kwargs):
return True
self.stubs.Set(image_utils, 'temporary_file', fake_temp_file)
self.stubs.Set(image_utils, 'fetch_verify_image',
lambda w, x, y, z: None)
self.stubs.Set(image_utils, 'convert_image',
lambda x, y, z: None)
self.stubs.Set(sheepdog.SheepdogDriver,
'_try_execute',
fake_try_execute)
fake_run_dog.return_value = ('fake_stdout', 'fake_stderr')
self.driver.copy_image_to_volume(None, self.test_data.TEST_VOLUME,
FakeImageService(), None)
def test_copy_volume_to_image(self):
fake_context = {}
fake_volume = {'name': 'volume-00000001'}
fake_image_service = mock.Mock()
fake_image_service_update = mock.Mock()
fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
patch = mock.patch.object
with patch(self.driver, '_try_execute') as fake_try_execute:
with patch(fake_image_service,
'update') as fake_image_service_update:
self.driver.copy_volume_to_image(fake_context,
fake_volume,
fake_image_service,
fake_image_meta)
expected_cmd = ('qemu-img',
'convert',
'-f', 'raw',
'-t', 'none',
'-O', 'raw',
'sheepdog:%s' % fake_volume['name'],
mock.ANY)
fake_try_execute.assert_called_once_with(*expected_cmd)
fake_image_service_update.assert_called_once_with(
fake_context, fake_image_meta['id'], mock.ANY, mock.ANY)
def test_copy_volume_to_image_nonexistent_volume(self):
fake_context = {}
fake_volume = {
'name': 'nonexistent-volume-82c4539e-c2a5-11e4-a293-0aa186c60fe0'}
fake_image_service = mock.Mock()
fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
# The command is expected to fail, so we don't want to retry it.
self.driver._try_execute = self.driver._execute
args = (fake_context, fake_volume, fake_image_service, fake_image_meta)
expected_errors = (processutils.ProcessExecutionError, OSError)
self.assertRaises(expected_errors,
self.driver.copy_volume_to_image,
*args)
def test_create_cloned_volume(self):
src_vol = {
'project_id': 'testprjid',
'name': six.text_type('volume-00000001'),
'size': '20',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
}
target_vol = {
'project_id': 'testprjid',
'name': six.text_type('volume-00000002'),
'size': '20',
'id': '582a1efa-be6a-11e4-a73b-0aa186c60fe0',
}
with mock.patch.object(self.driver,
'_try_execute') as mock_exe:
self.driver.create_cloned_volume(target_vol, src_vol)
snapshot_name = src_vol['name'] + '-temp-snapshot'
qemu_src_volume_name = "sheepdog:%s" % src_vol['name']
qemu_snapshot_name = '%s:%s' % (qemu_src_volume_name,
snapshot_name)
qemu_target_volume_name = "sheepdog:%s" % target_vol['name']
calls = [
mock.call('qemu-img', 'snapshot', '-c',
snapshot_name, qemu_src_volume_name),
mock.call('qemu-img', 'create', '-b',
qemu_snapshot_name,
qemu_target_volume_name,
'%sG' % target_vol['size']),
]
mock_exe.assert_has_calls(calls)
def test_create_cloned_volume_failure(self):
fake_name = six.text_type('volume-00000001')
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
src_vol = fake_vol
patch = mock.patch.object
with patch(self.driver, '_try_execute',
side_effect=processutils.ProcessExecutionError):
with patch(self.driver, 'create_snapshot'):
with patch(self.driver, 'delete_snapshot'):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
fake_vol,
src_vol)
def test_clone_image_success(self):
context = {}
fake_name = six.text_type('volume-00000001')
fake_size = '2'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
image_location = ('sheepdog:192.168.1.111:7000:Alice', None)
image_id = "caa4ffd0-fake-fake-fake-f8631a807f5a"
image_meta = {'id': image_id, 'size': 1, 'disk_format': 'raw'}
image_service = ''
patch = mock.patch.object
with patch(self.driver, '_try_execute', return_value=True):
with patch(self.driver, 'create_cloned_volume'):
with patch(self.driver, '_resize'):
model_updated, cloned = self.driver.clone_image(
context, fake_vol, image_location,
image_meta, image_service)
self.assertTrue(cloned)
self.assertEqual("sheepdog:%s" % fake_name,
model_updated['provider_location'])
def test_clone_image_failure(self):
context = {}
fake_vol = {}
image_location = ('image_location', None)
image_meta = {}
image_service = ''
with mock.patch.object(self.driver, '_is_cloneable',
lambda *args: False):
result = self.driver.clone_image(
context, fake_vol, image_location, image_meta, image_service)
self.assertEqual(({}, False), result)
def test_is_cloneable(self):
uuid = '87f1b01c-f46c-4537-bd5d-23962f5f4316'
location = 'sheepdog:ip:port:%s' % uuid
image_meta = {'id': uuid, 'size': 1, 'disk_format': 'raw'}
invalid_image_meta = {'id': uuid, 'size': 1, 'disk_format': 'iso'}
with mock.patch.object(self.driver, '_try_execute') as try_execute:
self.assertTrue(
self.driver._is_cloneable(location, image_meta))
expected_cmd = ('collie', 'vdi', 'list',
'--address', 'ip',
'--port', 'port',
uuid)
try_execute.assert_called_once_with(*expected_cmd)
# check returning False without executing a command
self.assertFalse(
self.driver._is_cloneable('invalid-location', image_meta))
self.assertFalse(
self.driver._is_cloneable(location, invalid_image_meta))
self.assertEqual(1, try_execute.call_count)
error = processutils.ProcessExecutionError
with mock.patch.object(self.driver, '_try_execute',
side_effect=error) as fail_try_execute:
self.assertFalse(
self.driver._is_cloneable(location, image_meta))
fail_try_execute.assert_called_once_with(*expected_cmd)
def test_extend_volume(self):
fake_name = u'volume-00000001'
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
def test_create_volume_from_snapshot(self):
fake_name = u'volume-00000001'
fake_size = '10'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
ss_uuid = '00000000-0000-0000-0000-c3aa7ee01536'
fake_snapshot = {'volume_name': fake_name,
'name': 'volume-%s' % ss_uuid,
'id': ss_uuid,
'size': fake_size}
with mock.patch.object(sheepdog.SheepdogDriver,
'_try_execute') as mock_exe:
self.driver.create_volume_from_snapshot(fake_vol, fake_snapshot)
args = ['qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (fake_snapshot['volume_name'],
fake_snapshot['name']),
"sheepdog:%s" % fake_vol['name'],
"%sG" % fake_vol['size']]
mock_exe.assert_called_once_with(*args)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogDriver, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogDriver, 'delete_snapshot')
def test_backup_volume_success(self, fake_delete_snapshot,
fake_backup_service, fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_backup = {'volume_id': '2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume = {'id': '2926efe0-24ab-45b7-95e1-ff66e0646a33',
'name': 'volume-2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume_get.return_value = fake_volume
self.driver.backup_volume(fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
call_args, call_kwargs = fake_backup_service.backup.call_args
call_backup, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogDriver, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogDriver, 'delete_snapshot')
def test_backup_volume_fail_to_create_snap(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_backup = {'volume_id': '2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume = {'id': '2926efe0-24ab-45b7-95e1-ff66e0646a33',
'name': 'volume-2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume_get.return_value = fake_volume
fake_create_snapshot.side_effect = processutils.ProcessExecutionError(
cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(1, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(db, 'volume_get')
@mock.patch.object(sheepdog.SheepdogDriver, '_try_execute')
@mock.patch.object(sheepdog.SheepdogDriver, 'create_snapshot')
@mock.patch.object(backup_driver, 'BackupDriver')
@mock.patch.object(sheepdog.SheepdogDriver, 'delete_snapshot')
def test_backup_volume_fail_to_backup_vol(self, fake_delete_snapshot,
fake_backup_service,
fake_create_snapshot,
fake_execute, fake_volume_get):
fake_context = {}
fake_backup = {'volume_id': '2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume = {'id': '2926efe0-24ab-45b7-95e1-ff66e0646a33',
'name': 'volume-2926efe0-24ab-45b7-95e1-ff66e0646a33'}
fake_volume_get.return_value = fake_volume
class BackupError(Exception):
pass
fake_backup_service.backup.side_effect = BackupError()
self.assertRaises(BackupError,
self.driver.backup_volume,
fake_context,
fake_backup,
fake_backup_service)
self.assertEqual(1, fake_create_snapshot.call_count)
self.assertEqual(2, fake_delete_snapshot.call_count)
self.assertEqual(fake_create_snapshot.call_args,
fake_delete_snapshot.call_args)
@mock.patch.object(backup_driver, 'BackupDriver')
def test_restore_backup(self, fake_backup_service):
fake_context = {}
fake_backup = {}
fake_volume = {'id': '2926efe0-24ab-45b7-95e1-ff66e0646a33',
'name': 'volume-2926efe0-24ab-45b7-95e1-ff66e0646a33'}
self.driver.restore_backup(
fake_context, fake_backup, fake_volume, fake_backup_service)
call_args, call_kwargs = fake_backup_service.restore.call_args
call_backup, call_volume_id, call_sheepdog_fd = call_args
self.assertEqual(fake_backup, call_backup)
self.assertEqual(fake_volume['id'], call_volume_id)
self.assertIsInstance(call_sheepdog_fd, sheepdog.SheepdogIOWrapper)
| |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# ext4slower Trace slow ext4 operations.
# For Linux, uses BCC, eBPF.
#
# USAGE: ext4slower [-h] [-j] [-p PID] [min_ms]
#
# This script traces common ext4 file operations: reads, writes, opens, and
# syncs. It measures the time spent in these operations, and prints details
# for each that exceeded a threshold.
#
# WARNING: This adds low-overhead instrumentation to these ext4 operations,
# including reads and writes from the file system cache. Such reads and writes
# can be very frequent (depending on the workload; eg, 1M/sec), at which
# point the overhead of this tool (even if it prints no "slower" events) can
# begin to become significant.
#
# By default, a minimum millisecond threshold of 10 is used.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 11-Feb-2016 Brendan Gregg Created this.
# 15-Oct-2016 Dina Goldshtein -p to filter by process ID.
# 13-Jun-2018 Joe Yin modify generic_file_read_iter to ext4_file_read_iter.
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
# symbols
kallsyms = "/proc/kallsyms"
# arguments
examples = """examples:
./ext4slower # trace operations slower than 10 ms (default)
./ext4slower 1 # trace operations slower than 1 ms
./ext4slower -j 1 # ... 1 ms, parsable output (csv)
./ext4slower 0 # trace all operations (warning: verbose)
./ext4slower -p 185 # trace PID 185 only
"""
parser = argparse.ArgumentParser(
description="Trace common ext4 file operations slower than a threshold",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-j", "--csv", action="store_true",
help="just print fields: comma-separated values")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("min_ms", nargs="?", default='10',
help="minimum I/O duration to trace, in ms (default 10)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_ms = int(args.min_ms)
pid = args.pid
csv = args.csv
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/dcache.h>
// XXX: switch these to char's when supported
#define TRACE_READ 0
#define TRACE_WRITE 1
#define TRACE_OPEN 2
#define TRACE_FSYNC 3
struct val_t {
u64 ts;
u64 offset;
struct file *fp;
};
struct data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 type;
u64 size;
u64 offset;
u64 delta_us;
u64 pid;
char task[TASK_COMM_LEN];
char file[DNAME_INLINE_LEN];
};
BPF_HASH(entryinfo, u64, struct val_t);
BPF_PERF_OUTPUT(events);
//
// Store timestamp and size on entry
//
// The current ext4 (Linux 4.5) uses generic_file_read_iter(), instead of it's
// own function, for reads. So we need to trace that and then filter on ext4,
// which I do by checking file->f_op.
// The new Linux version (since form 4.10) uses ext4_file_read_iter(), And if the 'CONFIG_FS_DAX'
// is not set ,then ext4_file_read_iter() will call generic_file_read_iter(), else it will call
// ext4_dax_read_iter(), and trace generic_file_read_iter() will fail.
int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// ext4 filter on file->f_op == ext4_file_operations
struct file *fp = iocb->ki_filp;
if ((u64)fp->f_op != EXT4_FILE_OPERATIONS)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = fp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_file_write_iter():
int trace_write_entry(struct pt_regs *ctx, struct kiocb *iocb)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = iocb->ki_filp;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_file_open():
int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
// ext4_sync_file():
int trace_fsync_entry(struct pt_regs *ctx, struct file *file)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if (FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = file;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
//
// Output
//
static int trace_return(struct pt_regs *ctx, int type)
{
struct val_t *valp;
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
valp = entryinfo.lookup(&id);
if (valp == 0) {
// missed tracing issue or filtered
return 0;
}
// calculate delta
u64 ts = bpf_ktime_get_ns();
u64 delta_us = (ts - valp->ts) / 1000;
entryinfo.delete(&id);
if (FILTER_US)
return 0;
// populate output struct
u32 size = PT_REGS_RC(ctx);
struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
.pid = pid};
data.ts_us = ts / 1000;
data.offset = valp->offset;
bpf_get_current_comm(&data.task, sizeof(data.task));
// workaround (rewriter should handle file to d_name in one step):
struct dentry *de = NULL;
struct qstr qs = {};
de = valp->fp->f_path.dentry;
qs = de->d_name;
if (qs.len == 0)
return 0;
bpf_probe_read(&data.file, sizeof(data.file), (void *)qs.name);
// output
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_READ);
}
int trace_write_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_WRITE);
}
int trace_open_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_OPEN);
}
int trace_fsync_return(struct pt_regs *ctx)
{
return trace_return(ctx, TRACE_FSYNC);
}
"""
# code replacements
with open(kallsyms) as syms:
ops = ''
for line in syms:
(addr, size, name) = line.rstrip().split(" ", 2)
name = name.split("\t")[0]
if name == "ext4_file_operations":
ops = "0x" + addr
break
if ops == '':
print("ERROR: no ext4_file_operations in /proc/kallsyms. Exiting.")
print("HINT: the kernel should be built with CONFIG_KALLSYMS_ALL.")
exit()
bpf_text = bpf_text.replace('EXT4_FILE_OPERATIONS', ops)
if min_ms == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US',
'delta_us <= %s' % str(min_ms * 1000))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
type = 'R'
if event.type == 1:
type = 'W'
elif event.type == 2:
type = 'O'
elif event.type == 3:
type = 'S'
if (csv):
print("%d,%s,%d,%s,%d,%d,%d,%s" % (
event.ts_us, event.task.decode('utf-8', 'replace'), event.pid,
type, event.size, event.offset, event.delta_us,
event.file.decode('utf-8', 'replace')))
return
print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
event.task.decode('utf-8', 'replace'), event.pid, type, event.size,
event.offset / 1024, float(event.delta_us) / 1000,
event.file.decode('utf-8', 'replace')))
# initialize BPF
b = BPF(text=bpf_text)
# Common file functions. See earlier comment about generic_file_read_iter().
if BPF.get_kprobe_functions(b'ext4_file_read_iter'):
b.attach_kprobe(event="ext4_file_read_iter", fn_name="trace_read_entry")
else:
b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry")
b.attach_kprobe(event="ext4_file_write_iter", fn_name="trace_write_entry")
b.attach_kprobe(event="ext4_file_open", fn_name="trace_open_entry")
b.attach_kprobe(event="ext4_sync_file", fn_name="trace_fsync_entry")
if BPF.get_kprobe_functions(b'ext4_file_read_iter'):
b.attach_kretprobe(event="ext4_file_read_iter", fn_name="trace_read_return")
else:
b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return")
b.attach_kretprobe(event="ext4_file_write_iter", fn_name="trace_write_return")
b.attach_kretprobe(event="ext4_file_open", fn_name="trace_open_return")
b.attach_kretprobe(event="ext4_sync_file", fn_name="trace_fsync_return")
# header
if (csv):
print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
else:
if min_ms == 0:
print("Tracing ext4 operations")
else:
print("Tracing ext4 operations slower than %d ms" % min_ms)
print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
"BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
# read events
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| |
#!/usr/bin/env python
"""The MySQL database methods for flow handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import threading
import time
from future.utils import iteritems
import MySQLdb
from MySQLdb.constants import ER as mysql_errors
from typing import List, Optional, Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import random
from grr_response_server.databases import db
from grr_response_server.databases import db_utils
from grr_response_server.databases import mysql_utils
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
class MySQLDBFlowMixin(object):
"""MySQLDB mixin for flow handling."""
@mysql_utils.WithTransaction()
def WriteMessageHandlerRequests(self, requests, cursor=None):
"""Writes a list of message handler requests to the database."""
query = ("INSERT IGNORE INTO message_handler_requests "
"(handlername, request_id, request) VALUES ")
value_templates = []
args = []
for r in requests:
args.extend([r.handler_name, r.request_id, r.SerializeToBytes()])
value_templates.append("(%s, %s, %s)")
query += ",".join(value_templates)
cursor.execute(query, args)
@mysql_utils.WithTransaction(readonly=True)
def ReadMessageHandlerRequests(self, cursor=None):
"""Reads all message handler requests from the database."""
query = ("SELECT UNIX_TIMESTAMP(timestamp), request,"
" UNIX_TIMESTAMP(leased_until), leased_by "
"FROM message_handler_requests "
"ORDER BY timestamp DESC")
cursor.execute(query)
res = []
for timestamp, request, leased_until, leased_by in cursor.fetchall():
req = rdf_objects.MessageHandlerRequest.FromSerializedBytes(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_by = leased_by
req.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until)
res.append(req)
return res
@mysql_utils.WithTransaction()
def DeleteMessageHandlerRequests(self, requests, cursor=None):
"""Deletes a list of message handler requests from the database."""
query = "DELETE FROM message_handler_requests WHERE request_id IN ({})"
request_ids = set([r.request_id for r in requests])
query = query.format(",".join(["%s"] * len(request_ids)))
cursor.execute(query, request_ids)
def RegisterMessageHandler(self, handler, lease_time, limit=1000):
"""Leases a number of message handler requests up to the indicated limit."""
self.UnregisterMessageHandler()
if handler:
self.handler_stop = False
self.handler_thread = threading.Thread(
name="message_handler",
target=self._MessageHandlerLoop,
args=(handler, lease_time, limit))
self.handler_thread.daemon = True
self.handler_thread.start()
def UnregisterMessageHandler(self, timeout=None):
"""Unregisters any registered message handler."""
if self.handler_thread:
self.handler_stop = True
self.handler_thread.join(timeout)
if self.handler_thread.isAlive():
raise RuntimeError("Message handler thread did not join in time.")
self.handler_thread = None
_MESSAGE_HANDLER_POLL_TIME_SECS = 5
def _MessageHandlerLoop(self, handler, lease_time, limit):
while not self.handler_stop:
try:
msgs = self._LeaseMessageHandlerRequests(lease_time, limit)
if msgs:
handler(msgs)
else:
time.sleep(self._MESSAGE_HANDLER_POLL_TIME_SECS)
except Exception as e: # pylint: disable=broad-except
logging.exception("_LeaseMessageHandlerRequests raised %s.", e)
@mysql_utils.WithTransaction()
def _LeaseMessageHandlerRequests(self, lease_time, limit, cursor=None):
"""Leases a number of message handler requests up to the indicated limit."""
now = rdfvalue.RDFDatetime.Now()
now_str = mysql_utils.RDFDatetimeToTimestamp(now)
expiry = now + lease_time
expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry)
query = ("UPDATE message_handler_requests "
"SET leased_until=FROM_UNIXTIME(%s), leased_by=%s "
"WHERE leased_until IS NULL OR leased_until < FROM_UNIXTIME(%s) "
"LIMIT %s")
id_str = utils.ProcessIdString()
args = (expiry_str, id_str, now_str, limit)
updated = cursor.execute(query, args)
if updated == 0:
return []
cursor.execute(
"SELECT UNIX_TIMESTAMP(timestamp), request "
"FROM message_handler_requests "
"WHERE leased_by=%s AND leased_until=FROM_UNIXTIME(%s) LIMIT %s",
(id_str, expiry_str, updated))
res = []
for timestamp, request in cursor.fetchall():
req = rdf_objects.MessageHandlerRequest.FromSerializedBytes(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_until = expiry
req.leased_by = id_str
res.append(req)
return res
@mysql_utils.WithTransaction(readonly=True)
def ReadAllClientActionRequests(self, client_id, cursor=None):
"""Reads all client messages available for a given client_id."""
query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, "
"leased_count "
"FROM client_action_requests "
"WHERE client_id = %s")
cursor.execute(query, [db_utils.ClientIDToInt(client_id)])
ret = []
for req, leased_until, leased_by, leased_count in cursor.fetchall():
request = rdf_flows.ClientActionRequest.FromSerializedBytes(req)
if leased_until is not None:
request.leased_by = leased_by
request.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until)
else:
request.leased_by = None
request.leased_until = None
request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count
ret.append(request)
return sorted(ret, key=lambda req: (req.flow_id, req.request_id))
def DeleteClientActionRequests(self, requests):
"""Deletes a list of client messages from the db."""
if not requests:
return
to_delete = []
for r in requests:
to_delete.append((r.client_id, r.flow_id, r.request_id))
if len(set(to_delete)) != len(to_delete):
raise ValueError(
"Received multiple copies of the same message to delete.")
self._DeleteClientActionRequest(to_delete)
@mysql_utils.WithTransaction()
def LeaseClientActionRequests(self,
client_id,
lease_time=None,
limit=None,
cursor=None):
"""Leases available client messages for the client with the given id."""
now = rdfvalue.RDFDatetime.Now()
now_str = mysql_utils.RDFDatetimeToTimestamp(now)
expiry = now + lease_time
expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry)
proc_id_str = utils.ProcessIdString()
client_id_int = db_utils.ClientIDToInt(client_id)
query = ("UPDATE client_action_requests "
"SET leased_until=FROM_UNIXTIME(%s), leased_by=%s, "
"leased_count=leased_count+1 "
"WHERE client_id=%s AND "
"(leased_until IS NULL OR leased_until < FROM_UNIXTIME(%s)) "
"LIMIT %s")
args = [expiry_str, proc_id_str, client_id_int, now_str, limit]
num_leased = cursor.execute(query, args)
if num_leased == 0:
return []
query = ("SELECT request, leased_count FROM client_action_requests "
"WHERE client_id=%s AND leased_until=FROM_UNIXTIME(%s) "
"AND leased_by=%s")
cursor.execute(query, [client_id_int, expiry_str, proc_id_str])
ret = []
expired = []
for req, leased_count in cursor.fetchall():
request = rdf_flows.ClientActionRequest.FromSerializedBytes(req)
request.leased_by = proc_id_str
request.leased_until = expiry
request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count
# > comparison since this check happens after the lease.
if leased_count > db.Database.CLIENT_MESSAGES_TTL:
expired.append((request.client_id, request.flow_id, request.request_id))
else:
ret.append(request)
if expired:
self._DeleteClientActionRequest(expired, cursor=cursor)
return sorted(ret, key=lambda req: (req.flow_id, req.request_id))
@mysql_utils.WithTransaction()
def WriteClientActionRequests(self, requests, cursor=None):
"""Writes messages that should go to the client to the db."""
query = ("INSERT IGNORE INTO client_action_requests "
"(client_id, flow_id, request_id, timestamp, request) "
"VALUES %s ON DUPLICATE KEY UPDATE "
"timestamp=VALUES(timestamp), request=VALUES(request)")
now = mysql_utils.RDFDatetimeToTimestamp(rdfvalue.RDFDatetime.Now())
value_templates = []
args = []
for r in requests:
args.extend([
db_utils.ClientIDToInt(r.client_id),
db_utils.FlowIDToInt(r.flow_id), r.request_id, now,
r.SerializeToBytes()
])
value_templates.append("(%s, %s, %s, FROM_UNIXTIME(%s), %s)")
query %= ",".join(value_templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests]
raise db.AtLeastOneUnknownRequestError(request_keys=request_keys, cause=e)
@mysql_utils.WithTransaction()
def WriteFlowObject(self, flow_obj, allow_update=True, cursor=None):
"""Writes a flow object to the database."""
query = """
INSERT INTO flows (client_id, flow_id, long_flow_id, parent_flow_id,
parent_hunt_id, flow, flow_state,
next_request_to_process, pending_termination, timestamp,
network_bytes_sent, user_cpu_time_used_micros,
system_cpu_time_used_micros, num_replies_sent, last_update)
VALUES (%(client_id)s, %(flow_id)s, %(long_flow_id)s, %(parent_flow_id)s,
%(parent_hunt_id)s, %(flow)s, %(flow_state)s,
%(next_request_to_process)s, %(pending_termination)s,
FROM_UNIXTIME(%(timestamp)s),
%(network_bytes_sent)s, %(user_cpu_time_used_micros)s,
%(system_cpu_time_used_micros)s, %(num_replies_sent)s, NOW(6))"""
if allow_update:
query += """
ON DUPLICATE KEY UPDATE
flow=VALUES(flow),
flow_state=VALUES(flow_state),
next_request_to_process=VALUES(next_request_to_process),
last_update=VALUES(last_update)"""
user_cpu_time_used_micros = db_utils.SecondsToMicros(
flow_obj.cpu_time_used.user_cpu_time)
system_cpu_time_used_micros = db_utils.SecondsToMicros(
flow_obj.cpu_time_used.system_cpu_time)
args = {
"client_id": db_utils.ClientIDToInt(flow_obj.client_id),
"flow_id": db_utils.FlowIDToInt(flow_obj.flow_id),
"long_flow_id": flow_obj.long_flow_id,
"flow": flow_obj.SerializeToBytes(),
"flow_state": int(flow_obj.flow_state),
"next_request_to_process": flow_obj.next_request_to_process,
"timestamp": mysql_utils.RDFDatetimeToTimestamp(flow_obj.create_time),
"network_bytes_sent": flow_obj.network_bytes_sent,
"num_replies_sent": flow_obj.num_replies_sent,
"user_cpu_time_used_micros": user_cpu_time_used_micros,
"system_cpu_time_used_micros": system_cpu_time_used_micros,
}
if flow_obj.parent_flow_id:
args["parent_flow_id"] = db_utils.FlowIDToInt(flow_obj.parent_flow_id)
else:
args["parent_flow_id"] = None
if flow_obj.parent_hunt_id:
args["parent_hunt_id"] = db_utils.HuntIDToInt(flow_obj.parent_hunt_id)
else:
args["parent_hunt_id"] = None
if flow_obj.HasField("pending_termination"):
serialized_termination = flow_obj.pending_termination.SerializeToBytes()
args["pending_termination"] = serialized_termination
else:
args["pending_termination"] = None
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
if e.args[0] == mysql_errors.DUP_ENTRY:
raise db.FlowExistsError(flow_obj.client_id, flow_obj.flow_id)
else:
raise db.UnknownClientError(flow_obj.client_id, cause=e)
def _FlowObjectFromRow(self, row):
"""Generates a flow object from a database row."""
flow, fs, cci, pt, nr, pd, po, ps, uct, sct, nbs, nrs, ts, lut = row
flow_obj = rdf_flow_objects.Flow.FromSerializedBytes(flow)
if fs not in [None, rdf_flow_objects.Flow.FlowState.UNSET]:
flow_obj.flow_state = fs
if cci is not None:
cc_cls = rdf_client.ClientCrash
flow_obj.client_crash_info = cc_cls.FromSerializedBytes(cci)
if pt is not None:
pt_cls = rdf_flow_objects.PendingFlowTermination
flow_obj.pending_termination = pt_cls.FromSerializedBytes(pt)
if nr:
flow_obj.next_request_to_process = nr
if pd is not None:
flow_obj.processing_deadline = mysql_utils.TimestampToRDFDatetime(pd)
if po is not None:
flow_obj.processing_on = po
if ps is not None:
flow_obj.processing_since = mysql_utils.TimestampToRDFDatetime(ps)
flow_obj.cpu_time_used.user_cpu_time = db_utils.MicrosToSeconds(uct)
flow_obj.cpu_time_used.system_cpu_time = db_utils.MicrosToSeconds(sct)
flow_obj.network_bytes_sent = nbs
if nrs:
flow_obj.num_replies_sent = nrs
flow_obj.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
flow_obj.last_update_time = mysql_utils.TimestampToRDFDatetime(lut)
return flow_obj
FLOW_DB_FIELDS = ("flow, "
"flow_state, "
"client_crash_info, "
"pending_termination, "
"next_request_to_process, "
"UNIX_TIMESTAMP(processing_deadline), "
"processing_on, "
"UNIX_TIMESTAMP(processing_since), "
"user_cpu_time_used_micros, "
"system_cpu_time_used_micros, "
"network_bytes_sent, "
"num_replies_sent, "
"UNIX_TIMESTAMP(timestamp), "
"UNIX_TIMESTAMP(last_update) ")
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowObject(self, client_id, flow_id, cursor=None):
"""Reads a flow object from the database."""
query = ("SELECT " + self.FLOW_DB_FIELDS +
"FROM flows WHERE client_id=%s AND flow_id=%s")
cursor.execute(
query,
[db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id)])
result = cursor.fetchall()
if not result:
raise db.UnknownFlowError(client_id, flow_id)
row, = result
return self._FlowObjectFromRow(row)
@mysql_utils.WithTransaction(readonly=True)
def ReadAllFlowObjects(self,
client_id = None,
min_create_time = None,
max_create_time = None,
include_child_flows = True,
cursor=None):
"""Returns all flow objects."""
conditions = []
args = []
if client_id is not None:
conditions.append("client_id = %s")
args.append(db_utils.ClientIDToInt(client_id))
if min_create_time is not None:
conditions.append("timestamp >= FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(min_create_time))
if max_create_time is not None:
conditions.append("timestamp <= FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(max_create_time))
if not include_child_flows:
conditions.append("parent_flow_id IS NULL")
query = "SELECT {} FROM flows".format(self.FLOW_DB_FIELDS)
if conditions:
query += " WHERE " + " AND ".join(conditions)
cursor.execute(query, args)
return [self._FlowObjectFromRow(row) for row in cursor.fetchall()]
@mysql_utils.WithTransaction(readonly=True)
def ReadChildFlowObjects(self, client_id, flow_id, cursor=None):
"""Reads flows that were started by a given flow from the database."""
query = ("SELECT " + self.FLOW_DB_FIELDS +
"FROM flows WHERE client_id=%s AND parent_flow_id=%s")
cursor.execute(
query,
[db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id)])
return [self._FlowObjectFromRow(row) for row in cursor.fetchall()]
@mysql_utils.WithTransaction()
def LeaseFlowForProcessing(self,
client_id,
flow_id,
processing_time,
cursor=None):
"""Marks a flow as being processed on this worker and returns it."""
query = ("SELECT " + self.FLOW_DB_FIELDS +
"FROM flows WHERE client_id=%s AND flow_id=%s")
cursor.execute(
query,
[db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id)])
response = cursor.fetchall()
if not response:
raise db.UnknownFlowError(client_id, flow_id)
row, = response
rdf_flow = self._FlowObjectFromRow(row)
now = rdfvalue.RDFDatetime.Now()
if rdf_flow.processing_on and rdf_flow.processing_deadline > now:
raise ValueError("Flow %s on client %s is already being processed." %
(flow_id, client_id))
if rdf_flow.parent_hunt_id is not None:
query = "SELECT hunt_state FROM hunts WHERE hunt_id=%s"
args = [db_utils.HuntIDToInt(rdf_flow.parent_hunt_id)]
rows_found = cursor.execute(query, args)
if rows_found == 1:
hunt_state, = cursor.fetchone()
if (hunt_state is not None and
not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_state)):
raise db.ParentHuntIsNotRunningError(client_id, flow_id,
rdf_flow.parent_hunt_id,
hunt_state)
update_query = ("UPDATE flows SET "
"processing_on=%s, "
"processing_since=FROM_UNIXTIME(%s), "
"processing_deadline=FROM_UNIXTIME(%s) "
"WHERE client_id=%s and flow_id=%s")
processing_deadline = now + processing_time
process_id_string = utils.ProcessIdString()
args = [
process_id_string,
mysql_utils.RDFDatetimeToTimestamp(now),
mysql_utils.RDFDatetimeToTimestamp(processing_deadline),
db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id)
]
cursor.execute(update_query, args)
# This needs to happen after we are sure that the write has succeeded.
rdf_flow.processing_on = process_id_string
rdf_flow.processing_since = now
rdf_flow.processing_deadline = processing_deadline
return rdf_flow
@mysql_utils.WithTransaction()
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=db.Database.unchanged,
flow_state=db.Database.unchanged,
client_crash_info=db.Database.unchanged,
pending_termination=db.Database.unchanged,
processing_on=db.Database.unchanged,
processing_since=db.Database.unchanged,
processing_deadline=db.Database.unchanged,
cursor=None):
"""Updates flow objects in the database."""
updates = []
args = []
if flow_obj != db.Database.unchanged:
updates.append("flow=%s")
args.append(flow_obj.SerializeToBytes())
updates.append("flow_state=%s")
args.append(int(flow_obj.flow_state))
updates.append("user_cpu_time_used_micros=%s")
args.append(
db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time))
updates.append("system_cpu_time_used_micros=%s")
args.append(
db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time))
updates.append("network_bytes_sent=%s")
args.append(flow_obj.network_bytes_sent)
updates.append("num_replies_sent=%s")
args.append(flow_obj.num_replies_sent)
if flow_state != db.Database.unchanged:
updates.append("flow_state=%s")
args.append(int(flow_state))
if client_crash_info != db.Database.unchanged:
updates.append("client_crash_info=%s")
args.append(client_crash_info.SerializeToBytes())
if pending_termination != db.Database.unchanged:
updates.append("pending_termination=%s")
args.append(pending_termination.SerializeToBytes())
if processing_on != db.Database.unchanged:
updates.append("processing_on=%s")
args.append(processing_on)
if processing_since != db.Database.unchanged:
updates.append("processing_since=FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(processing_since))
if processing_deadline != db.Database.unchanged:
updates.append("processing_deadline=FROM_UNIXTIME(%s)")
args.append(mysql_utils.RDFDatetimeToTimestamp(processing_deadline))
if not updates:
return
query = "UPDATE flows SET last_update=NOW(6), "
query += ", ".join(updates)
query += " WHERE client_id=%s AND flow_id=%s"
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
updated = cursor.execute(query, args)
if updated == 0:
raise db.UnknownFlowError(client_id, flow_id)
@mysql_utils.WithTransaction()
def UpdateFlows(self,
client_id_flow_id_pairs,
pending_termination=db.Database.unchanged,
cursor=None):
"""Updates flow objects in the database."""
if pending_termination == db.Database.unchanged:
return
serialized_termination = pending_termination.SerializeToBytes()
query = "UPDATE flows SET pending_termination=%s WHERE "
args = [serialized_termination]
for index, (client_id, flow_id) in enumerate(client_id_flow_id_pairs):
query += ("" if index == 0 else " OR ") + " client_id=%s AND flow_id=%s"
args.extend(
[db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id)])
cursor.execute(query, args)
def _WriteFlowProcessingRequests(self, requests, cursor):
"""Returns a (query, args) tuple that inserts the given requests."""
templates = []
args = []
for req in requests:
templates.append("(%s, %s, %s, FROM_UNIXTIME(%s))")
args.append(db_utils.ClientIDToInt(req.client_id))
args.append(db_utils.FlowIDToInt(req.flow_id))
args.append(req.SerializeToBytes())
if req.delivery_time:
args.append(mysql_utils.RDFDatetimeToTimestamp(req.delivery_time))
else:
args.append(None)
query = ("INSERT INTO flow_processing_requests "
"(client_id, flow_id, request, delivery_time) VALUES ")
query += ", ".join(templates)
cursor.execute(query, args)
@mysql_utils.WithTransaction()
def WriteFlowRequests(self, requests, cursor=None):
"""Writes a list of flow requests to the database."""
args = []
templates = []
flow_keys = []
needs_processing = {}
for r in requests:
if r.needs_processing:
needs_processing.setdefault((r.client_id, r.flow_id), []).append(r)
flow_keys.append((r.client_id, r.flow_id))
templates.append("(%s, %s, %s, %s, %s)")
args.extend([
db_utils.ClientIDToInt(r.client_id),
db_utils.FlowIDToInt(r.flow_id), r.request_id, r.needs_processing,
r.SerializeToBytes()
])
if needs_processing:
flow_processing_requests = []
nr_conditions = []
nr_args = []
for client_id, flow_id in needs_processing:
nr_conditions.append("(client_id=%s AND flow_id=%s)")
nr_args.append(db_utils.ClientIDToInt(client_id))
nr_args.append(db_utils.FlowIDToInt(flow_id))
nr_query = ("SELECT client_id, flow_id, next_request_to_process "
"FROM flows WHERE ")
nr_query += " OR ".join(nr_conditions)
cursor.execute(nr_query, nr_args)
db_result = cursor.fetchall()
for client_id_int, flow_id_int, next_request_to_process in db_result:
client_id = db_utils.IntToClientID(client_id_int)
flow_id = db_utils.IntToFlowID(flow_id_int)
candidate_requests = needs_processing.get((client_id, flow_id), [])
for r in candidate_requests:
if next_request_to_process == r.request_id:
flow_processing_requests.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id,
flow_id=flow_id,
delivery_time=r.start_time))
if flow_processing_requests:
self._WriteFlowProcessingRequests(flow_processing_requests, cursor)
query = ("INSERT INTO flow_requests "
"(client_id, flow_id, request_id, needs_processing, request) "
"VALUES ")
query += ", ".join(templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
raise db.AtLeastOneUnknownFlowError(flow_keys, cause=e)
def _WriteResponses(self, responses, cursor):
"""Builds the writes to store the given responses in the db."""
query = ("INSERT IGNORE INTO flow_responses "
"(client_id, flow_id, request_id, response_id, "
"response, status, iterator, timestamp) VALUES ")
templates = []
args = []
for r in responses:
templates.append("(%s, %s, %s, %s, %s, %s, %s, NOW(6))")
client_id_int = db_utils.ClientIDToInt(r.client_id)
flow_id_int = db_utils.FlowIDToInt(r.flow_id)
args.append(client_id_int)
args.append(flow_id_int)
args.append(r.request_id)
args.append(r.response_id)
if isinstance(r, rdf_flow_objects.FlowResponse):
args.append(r.SerializeToBytes())
args.append("")
args.append("")
elif isinstance(r, rdf_flow_objects.FlowStatus):
args.append("")
args.append(r.SerializeToBytes())
args.append("")
elif isinstance(r, rdf_flow_objects.FlowIterator):
args.append("")
args.append("")
args.append(r.SerializeToBytes())
else:
# This can't really happen due to db api type checking.
raise ValueError("Got unexpected response type: %s %s" % (type(r), r))
query += ",".join(templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError:
# If we have multiple responses and one of them fails to insert, we try
# them one by one so we don't lose any valid replies.
if len(responses) > 1:
for r in responses:
self._WriteResponses([r], cursor)
else:
logging.warn("Response for unknown request: %s", responses[0])
@mysql_utils.WithTransaction()
def _DeleteClientActionRequest(self, to_delete, cursor=None):
"""Builds deletes for client messages."""
query = "DELETE FROM client_action_requests WHERE "
conditions = []
args = []
for client_id, flow_id, request_id in to_delete:
conditions.append("(client_id=%s AND flow_id=%s AND request_id=%s)")
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
query += " OR ".join(conditions)
cursor.execute(query, args)
@mysql_utils.WithTransaction()
def _WriteFlowResponsesAndExpectedUpdates(self, responses, cursor=None):
"""Writes a flow responses and updates flow requests expected counts."""
self._WriteResponses(responses, cursor)
query = """
UPDATE flow_requests
SET responses_expected=%(responses_expected)s
WHERE
client_id = %(client_id)s AND
flow_id = %(flow_id)s AND
request_id = %(request_id)s
"""
for r in responses:
# If the response is a FlowStatus, we have to update the FlowRequest with
# the number of expected messages.
if isinstance(r, rdf_flow_objects.FlowStatus):
args = {
"client_id": db_utils.ClientIDToInt(r.client_id),
"flow_id": db_utils.FlowIDToInt(r.flow_id),
"request_id": r.request_id,
"responses_expected": r.response_id,
}
cursor.execute(query, args)
def _ReadFlowResponseCounts(self, request_keys, cursor=None):
"""Reads counts of responses for the given requests."""
query = """
SELECT
flow_requests.client_id, flow_requests.flow_id,
flow_requests.request_id, COUNT(*)
FROM flow_responses, flow_requests
WHERE ({conditions}) AND
flow_requests.client_id = flow_responses.client_id AND
flow_requests.flow_id = flow_responses.flow_id AND
flow_requests.request_id = flow_responses.request_id AND
flow_requests.needs_processing = FALSE
GROUP BY
flow_requests.client_id,
flow_requests.flow_id,
flow_requests.request_id
"""
condition_template = """
(flow_requests.client_id=%s AND
flow_requests.flow_id=%s AND
flow_requests.request_id=%s)"""
conditions = [condition_template] * len(request_keys)
args = []
for client_id, flow_id, request_id in request_keys:
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
response_counts = {}
for (client_id_int, flow_id_int, request_id, count) in cursor.fetchall():
request_key = (db_utils.IntToClientID(client_id_int),
db_utils.IntToFlowID(flow_id_int), request_id)
response_counts[request_key] = count
return response_counts
def _ReadAndLockNextRequestsToProcess(self, flow_keys, cursor):
"""Reads and locks the next_request_to_process for a number of flows."""
query = """
SELECT client_id, flow_id, next_request_to_process
FROM flows
WHERE {conditions}
FOR UPDATE
"""
condition_template = "(client_id = %s AND flow_id = %s)"
conditions = [condition_template] * len(flow_keys)
query = query.format(conditions=" OR ".join(conditions))
args = []
for client_id, flow_id in flow_keys:
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
cursor.execute(query, args)
next_requests = {}
for client_id_int, flow_id_int, next_request in cursor.fetchall():
flow_key = (db_utils.IntToClientID(client_id_int),
db_utils.IntToFlowID(flow_id_int))
next_requests[flow_key] = next_request
return next_requests
def _ReadLockAndUpdateCompletedRequests(self, request_keys, response_counts,
cursor):
"""Reads, locks, and updates completed requests."""
condition_template = """
(flow_requests.client_id = %s AND
flow_requests.flow_id = %s AND
flow_requests.request_id = %s AND
responses_expected = %s)"""
args = []
conditions = []
completed_requests = {}
for request_key in request_keys:
client_id, flow_id, request_id = request_key
if request_key in response_counts:
conditions.append(condition_template)
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
args.append(response_counts[request_key])
if not args:
return completed_requests
query = """
SELECT client_id, flow_id, request_id, request
FROM flow_requests
WHERE ({conditions}) AND NOT needs_processing
FOR UPDATE
"""
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
for client_id_int, flow_id_int, request_id, request in cursor.fetchall():
request_key = (db_utils.IntToClientID(client_id_int),
db_utils.IntToFlowID(flow_id_int), request_id)
r = rdf_flow_objects.FlowRequest.FromSerializedBytes(request)
completed_requests[request_key] = r
query = """
UPDATE flow_requests
SET needs_processing = TRUE
WHERE ({conditions}) AND NOT needs_processing
"""
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
return completed_requests
@mysql_utils.WithTransaction()
def _UpdateRequestsAndScheduleFPRs(self, responses, cursor=None):
"""Updates requests and writes FlowProcessingRequests if needed."""
request_keys = set(
(r.client_id, r.flow_id, r.request_id) for r in responses)
flow_keys = set((r.client_id, r.flow_id) for r in responses)
response_counts = self._ReadFlowResponseCounts(request_keys, cursor)
next_requests = self._ReadAndLockNextRequestsToProcess(flow_keys, cursor)
completed_requests = self._ReadLockAndUpdateCompletedRequests(
request_keys, response_counts, cursor)
if not completed_requests:
return completed_requests
fprs_to_write = []
for request_key, r in iteritems(completed_requests):
client_id, flow_id, request_id = request_key
if next_requests[(client_id, flow_id)] == request_id:
fprs_to_write.append(
rdf_flows.FlowProcessingRequest(
client_id=r.client_id,
flow_id=r.flow_id,
delivery_time=r.start_time))
if fprs_to_write:
self._WriteFlowProcessingRequests(fprs_to_write, cursor)
return completed_requests
@db_utils.CallLoggedAndAccounted
def WriteFlowResponses(self, responses):
"""Writes FlowMessages and updates corresponding requests."""
if not responses:
return
for batch in collection.Batch(responses, self._WRITE_ROWS_BATCH_SIZE):
self._WriteFlowResponsesAndExpectedUpdates(batch)
completed_requests = self._UpdateRequestsAndScheduleFPRs(batch)
if completed_requests:
self._DeleteClientActionRequest(completed_requests)
@mysql_utils.WithTransaction()
def DeleteFlowRequests(self, requests, cursor=None):
"""Deletes a list of flow requests from the database."""
if not requests:
return
for batch in collection.Batch(requests, self._DELETE_ROWS_BATCH_SIZE):
# Each iteration might delete more than BATCH_SIZE flow_responses.
# This is acceptable, because batching should only prevent the statement
# size from growing too large.
conditions = []
args = []
for r in batch:
conditions.append("(client_id=%s AND flow_id=%s AND request_id=%s)")
args.append(db_utils.ClientIDToInt(r.client_id))
args.append(db_utils.FlowIDToInt(r.flow_id))
args.append(r.request_id)
req_query = "DELETE FROM flow_requests WHERE " + " OR ".join(conditions)
res_query = "DELETE FROM flow_responses WHERE " + " OR ".join(conditions)
cursor.execute(res_query, args)
cursor.execute(req_query, args)
@mysql_utils.WithTransaction(readonly=True)
def ReadAllFlowRequestsAndResponses(self, client_id, flow_id, cursor=None):
"""Reads all requests and responses for a given flow from the database."""
query = ("SELECT request, needs_processing, responses_expected, "
"UNIX_TIMESTAMP(timestamp) "
"FROM flow_requests WHERE client_id=%s AND flow_id=%s")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
cursor.execute(query, args)
requests = []
for req, needs_processing, resp_expected, ts in cursor.fetchall():
request = rdf_flow_objects.FlowRequest.FromSerializedBytes(req)
request.needs_processing = needs_processing
request.nr_responses_expected = resp_expected
request.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
requests.append(request)
query = ("SELECT response, status, iterator, UNIX_TIMESTAMP(timestamp) "
"FROM flow_responses WHERE client_id=%s AND flow_id=%s")
cursor.execute(query, args)
responses = {}
for res, status, iterator, ts in cursor.fetchall():
if status:
response = rdf_flow_objects.FlowStatus.FromSerializedBytes(status)
elif iterator:
response = rdf_flow_objects.FlowIterator.FromSerializedBytes(iterator)
else:
response = rdf_flow_objects.FlowResponse.FromSerializedBytes(res)
response.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
responses.setdefault(response.request_id,
{})[response.response_id] = response
ret = []
for req in sorted(requests, key=lambda r: r.request_id):
ret.append((req, responses.get(req.request_id, {})))
return ret
@mysql_utils.WithTransaction()
def DeleteAllFlowRequestsAndResponses(self, client_id, flow_id, cursor=None):
"""Deletes all requests and responses for a given flow from the database."""
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
res_query = "DELETE FROM flow_responses WHERE client_id=%s AND flow_id=%s"
cursor.execute(res_query, args)
req_query = "DELETE FROM flow_requests WHERE client_id=%s AND flow_id=%s"
cursor.execute(req_query, args)
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowRequestsReadyForProcessing(self,
client_id,
flow_id,
next_needed_request,
cursor=None):
"""Reads all requests for a flow that can be processed by the worker."""
query = ("SELECT request, needs_processing, responses_expected, "
"UNIX_TIMESTAMP(timestamp) "
"FROM flow_requests "
"WHERE client_id=%s AND flow_id=%s")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
cursor.execute(query, args)
requests = {}
for req, needs_processing, responses_expected, ts in cursor.fetchall():
if not needs_processing:
continue
request = rdf_flow_objects.FlowRequest.FromSerializedBytes(req)
request.needs_processing = needs_processing
request.nr_responses_expected = responses_expected
request.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
requests[request.request_id] = request
query = ("SELECT response, status, iterator, UNIX_TIMESTAMP(timestamp) "
"FROM flow_responses "
"WHERE client_id=%s AND flow_id=%s")
cursor.execute(query, args)
responses = {}
for res, status, iterator, ts in cursor.fetchall():
if status:
response = rdf_flow_objects.FlowStatus.FromSerializedBytes(status)
elif iterator:
response = rdf_flow_objects.FlowIterator.FromSerializedBytes(iterator)
else:
response = rdf_flow_objects.FlowResponse.FromSerializedBytes(res)
response.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
responses.setdefault(response.request_id, []).append(response)
res = {}
while next_needed_request in requests:
req = requests[next_needed_request]
sorted_responses = sorted(
responses.get(next_needed_request, []), key=lambda r: r.response_id)
res[req.request_id] = (req, sorted_responses)
next_needed_request += 1
return res
@mysql_utils.WithTransaction()
def ReleaseProcessedFlow(self, flow_obj, cursor=None):
"""Releases a flow that the worker was processing to the database."""
update_query = """
UPDATE flows
LEFT OUTER JOIN (
SELECT client_id, flow_id, needs_processing
FROM flow_requests
WHERE
client_id = %(client_id)s AND
flow_id = %(flow_id)s AND
request_id = %(next_request_to_process)s AND
needs_processing
) AS needs_processing
ON
flows.client_id = needs_processing.client_id AND
flows.flow_id = needs_processing.flow_id
SET
flows.flow = %(flow)s,
flows.processing_on = NULL,
flows.processing_since = NULL,
flows.processing_deadline = NULL,
flows.next_request_to_process = %(next_request_to_process)s,
flows.flow_state = %(flow_state)s,
flows.user_cpu_time_used_micros = %(user_cpu_time_used_micros)s,
flows.system_cpu_time_used_micros = %(system_cpu_time_used_micros)s,
flows.network_bytes_sent = %(network_bytes_sent)s,
flows.num_replies_sent = %(num_replies_sent)s,
flows.last_update = NOW(6)
WHERE
flows.client_id = %(client_id)s AND
flows.flow_id = %(flow_id)s AND (
needs_processing.needs_processing = FALSE OR
needs_processing.needs_processing IS NULL)
"""
clone = flow_obj.Copy()
clone.processing_on = None
clone.processing_since = None
clone.processing_deadline = None
args = {
"client_id":
db_utils.ClientIDToInt(flow_obj.client_id),
"flow":
clone.SerializeToBytes(),
"flow_id":
db_utils.FlowIDToInt(flow_obj.flow_id),
"flow_state":
int(clone.flow_state),
"network_bytes_sent":
flow_obj.network_bytes_sent,
"next_request_to_process":
flow_obj.next_request_to_process,
"num_replies_sent":
flow_obj.num_replies_sent,
"system_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time),
"user_cpu_time_used_micros":
db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time),
}
rows_updated = cursor.execute(update_query, args)
return rows_updated == 1
@mysql_utils.WithTransaction()
def WriteFlowProcessingRequests(self, requests, cursor=None):
"""Writes a list of flow processing requests to the database."""
self._WriteFlowProcessingRequests(requests, cursor)
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowProcessingRequests(self, cursor=None):
"""Reads all flow processing requests from the database."""
query = ("SELECT request, UNIX_TIMESTAMP(timestamp) "
"FROM flow_processing_requests")
cursor.execute(query)
res = []
for serialized_request, ts in cursor.fetchall():
req = rdf_flows.FlowProcessingRequest.FromSerializedBytes(
serialized_request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(ts)
res.append(req)
return res
@mysql_utils.WithTransaction()
def AckFlowProcessingRequests(self, requests, cursor=None):
"""Deletes a list of flow processing requests from the database."""
if not requests:
return
query = "DELETE FROM flow_processing_requests WHERE "
conditions = []
args = []
for r in requests:
conditions.append(
"(client_id=%s AND flow_id=%s AND timestamp=FROM_UNIXTIME(%s))")
args.append(db_utils.ClientIDToInt(r.client_id))
args.append(db_utils.FlowIDToInt(r.flow_id))
args.append(mysql_utils.RDFDatetimeToTimestamp(r.timestamp))
query += " OR ".join(conditions)
cursor.execute(query, args)
@mysql_utils.WithTransaction()
def DeleteAllFlowProcessingRequests(self, cursor=None):
"""Deletes all flow processing requests from the database."""
query = "DELETE FROM flow_processing_requests WHERE true"
cursor.execute(query)
@mysql_utils.WithTransaction()
def _LeaseFlowProcessingReqests(self, cursor=None):
"""Leases a number of flow processing requests."""
now = rdfvalue.RDFDatetime.Now()
expiry = now + rdfvalue.Duration.From(10, rdfvalue.MINUTES)
query = """
UPDATE flow_processing_requests
SET leased_until=FROM_UNIXTIME(%(expiry)s), leased_by=%(id)s
WHERE
(delivery_time IS NULL OR
delivery_time <= NOW(6)) AND
(leased_until IS NULL OR
leased_until < NOW(6))
LIMIT %(limit)s
"""
# Appending a random id here to make the key we lease by unique in cases
# where we run multiple leasing attempts with the same timestamp - which can
# happen on Windows where timestamp resolution is lower.
id_str = "%s:%d" % (utils.ProcessIdString(), random.UInt16())
expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry)
args = {
"expiry": expiry_str,
"id": id_str,
"limit": 50,
}
updated = cursor.execute(query, args)
if updated == 0:
return []
query = """
SELECT UNIX_TIMESTAMP(timestamp), request
FROM flow_processing_requests
FORCE INDEX (flow_processing_requests_by_lease)
WHERE leased_by=%(id)s AND leased_until=FROM_UNIXTIME(%(expiry)s)
LIMIT %(updated)s
"""
args = {
"expiry": expiry_str,
"id": id_str,
"updated": updated,
}
cursor.execute(query, args)
res = []
for timestamp, request in cursor.fetchall():
req = rdf_flows.FlowProcessingRequest.FromSerializedBytes(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_until = expiry
req.leased_by = id_str
res.append(req)
return res
_FLOW_REQUEST_POLL_TIME_SECS = 3
def _FlowProcessingRequestHandlerLoop(self, handler):
"""The main loop for the flow processing request queue."""
while not self.flow_processing_request_handler_stop:
try:
msgs = self._LeaseFlowProcessingReqests()
if msgs:
for m in msgs:
self.flow_processing_request_handler_pool.AddTask(
target=handler, args=(m,))
else:
time.sleep(self._FLOW_REQUEST_POLL_TIME_SECS)
except Exception as e: # pylint: disable=broad-except
logging.exception("_FlowProcessingRequestHandlerLoop raised %s.", e)
break
def RegisterFlowProcessingHandler(self, handler):
"""Registers a handler to receive flow processing messages."""
self.UnregisterFlowProcessingHandler()
if handler:
self.flow_processing_request_handler_stop = False
self.flow_processing_request_handler_thread = threading.Thread(
name="flow_processing_request_handler",
target=self._FlowProcessingRequestHandlerLoop,
args=(handler,))
self.flow_processing_request_handler_thread.daemon = True
self.flow_processing_request_handler_thread.start()
def UnregisterFlowProcessingHandler(self, timeout=None):
"""Unregisters any registered flow processing handler."""
if self.flow_processing_request_handler_thread:
self.flow_processing_request_handler_stop = True
self.flow_processing_request_handler_thread.join(timeout)
if self.flow_processing_request_handler_thread.isAlive():
raise RuntimeError("Flow processing handler did not join in time.")
self.flow_processing_request_handler_thread = None
@mysql_utils.WithTransaction()
def WriteFlowResults(self, results, cursor=None):
"""Writes flow results for a given flow."""
query = ("INSERT INTO flow_results "
"(client_id, flow_id, hunt_id, timestamp, payload, type, tag) "
"VALUES ")
templates = []
args = []
for r in results:
templates.append("(%s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s)")
args.append(db_utils.ClientIDToInt(r.client_id))
args.append(db_utils.FlowIDToInt(r.flow_id))
if r.hunt_id:
args.append(db_utils.HuntIDToInt(r.hunt_id))
else:
args.append(0)
args.append(
mysql_utils.RDFDatetimeToTimestamp(rdfvalue.RDFDatetime.Now()))
args.append(r.payload.SerializeToBytes())
args.append(compatibility.GetName(r.payload.__class__))
args.append(r.tag)
query += ",".join(templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
raise db.AtLeastOneUnknownFlowError(
[(r.client_id, r.flow_id) for r in results], cause=e)
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowResults(self,
client_id,
flow_id,
offset,
count,
with_tag=None,
with_type=None,
with_substring=None,
cursor=None):
"""Reads flow results of a given flow using given query options."""
query = ("SELECT payload, type, UNIX_TIMESTAMP(timestamp), tag "
"FROM flow_results "
"FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) "
"WHERE client_id = %s AND flow_id = %s ")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
if with_tag is not None:
query += "AND tag = %s "
args.append(with_tag)
if with_type is not None:
query += "AND type = %s "
args.append(with_type)
if with_substring is not None:
query += "AND payload LIKE %s "
args.append("%{}%".format(with_substring))
query += "ORDER BY timestamp ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
ret = []
for serialized_payload, payload_type, ts, tag in cursor.fetchall():
if payload_type in rdfvalue.RDFValue.classes:
payload = rdfvalue.RDFValue.classes[payload_type].FromSerializedBytes(
serialized_payload)
else:
payload = rdf_objects.SerializedValueOfUnrecognizedType(
type_name=payload_type, value=serialized_payload)
timestamp = mysql_utils.TimestampToRDFDatetime(ts)
result = rdf_flow_objects.FlowResult(payload=payload, timestamp=timestamp)
if tag:
result.tag = tag
ret.append(result)
return ret
@mysql_utils.WithTransaction(readonly=True)
def CountFlowResults(self,
client_id,
flow_id,
with_tag=None,
with_type=None,
cursor=None):
"""Counts flow results of a given flow using given query options."""
query = ("SELECT COUNT(*) "
"FROM flow_results "
"FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) "
"WHERE client_id = %s AND flow_id = %s ")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
if with_tag is not None:
query += "AND tag = %s "
args.append(with_tag)
if with_type is not None:
query += "AND type = %s "
args.append(with_type)
cursor.execute(query, args)
return cursor.fetchone()[0]
@mysql_utils.WithTransaction(readonly=True)
def CountFlowResultsByType(self, client_id, flow_id, cursor=None):
"""Returns counts of flow results grouped by result type."""
query = ("SELECT type, COUNT(*) FROM flow_results "
"FORCE INDEX (flow_results_by_client_id_flow_id_timestamp) "
"WHERE client_id = %s AND flow_id = %s "
"GROUP BY type")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
cursor.execute(query, args)
return dict(cursor.fetchall())
@mysql_utils.WithTransaction()
def WriteFlowLogEntries(self, entries, cursor=None):
"""Writes flow log entries for a given flow."""
query = ("INSERT INTO flow_log_entries "
"(client_id, flow_id, hunt_id, message) "
"VALUES ")
templates = []
args = []
for entry in entries:
templates.append("(%s, %s, %s, %s)")
args.append(db_utils.ClientIDToInt(entry.client_id))
args.append(db_utils.FlowIDToInt(entry.flow_id))
if entry.hunt_id:
args.append(db_utils.HuntIDToInt(entry.hunt_id))
else:
args.append(0)
args.append(entry.message)
query += ",".join(templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
raise db.AtLeastOneUnknownFlowError(
[(entry.client_id, entry.flow_id) for entry in entries], cause=e)
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowLogEntries(self,
client_id,
flow_id,
offset,
count,
with_substring=None,
cursor=None):
"""Reads flow log entries of a given flow using given query options."""
query = ("SELECT message, UNIX_TIMESTAMP(timestamp) "
"FROM flow_log_entries "
"FORCE INDEX (flow_log_entries_by_flow) "
"WHERE client_id = %s AND flow_id = %s ")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
if with_substring is not None:
query += "AND message LIKE %s "
args.append("%{}%".format(with_substring))
query += "ORDER BY log_id ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
ret = []
for message, timestamp in cursor.fetchall():
ret.append(
rdf_flow_objects.FlowLogEntry(
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return ret
@mysql_utils.WithTransaction(readonly=True)
def CountFlowLogEntries(self, client_id, flow_id, cursor=None):
"""Returns number of flow log entries of a given flow."""
query = ("SELECT COUNT(*) "
"FROM flow_log_entries "
"FORCE INDEX (flow_log_entries_by_flow) "
"WHERE client_id = %s AND flow_id = %s ")
args = [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]
cursor.execute(query, args)
return cursor.fetchone()[0]
@mysql_utils.WithTransaction()
def WriteFlowOutputPluginLogEntries(self, entries, cursor=None):
"""Writes flow output plugin log entries for a given flow."""
query = ("INSERT INTO flow_output_plugin_log_entries "
"(client_id, flow_id, hunt_id, output_plugin_id, "
"log_entry_type, message) "
"VALUES ")
templates = []
args = []
for entry in entries:
templates.append("(%s, %s, %s, %s, %s, %s)")
args.append(db_utils.ClientIDToInt(entry.client_id))
args.append(db_utils.FlowIDToInt(entry.flow_id))
if entry.hunt_id:
args.append(db_utils.HuntIDToInt(entry.hunt_id))
else:
args.append(0)
args.append(db_utils.OutputPluginIDToInt(entry.output_plugin_id))
args.append(int(entry.log_entry_type))
args.append(entry.message)
query += ",".join(templates)
try:
cursor.execute(query, args)
except MySQLdb.IntegrityError as e:
raise db.AtLeastOneUnknownFlowError(
[(entry.client_id, entry.flow_id) for entry in entries], cause=e)
@mysql_utils.WithTransaction(readonly=True)
def ReadFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
offset,
count,
with_type=None,
cursor=None):
"""Reads flow output plugin log entries."""
query = ("SELECT log_entry_type, message, UNIX_TIMESTAMP(timestamp) "
"FROM flow_output_plugin_log_entries "
"FORCE INDEX (flow_output_plugin_log_entries_by_flow) "
"WHERE client_id = %s AND flow_id = %s AND output_plugin_id = %s ")
args = [
db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id),
db_utils.OutputPluginIDToInt(output_plugin_id)
]
if with_type is not None:
query += "AND log_entry_type = %s "
args.append(int(with_type))
query += "ORDER BY log_id ASC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
ret = []
for log_entry_type, message, timestamp in cursor.fetchall():
ret.append(
rdf_flow_objects.FlowOutputPluginLogEntry(
client_id=client_id,
flow_id=flow_id,
output_plugin_id=output_plugin_id,
log_entry_type=log_entry_type,
message=message,
timestamp=mysql_utils.TimestampToRDFDatetime(timestamp)))
return ret
@mysql_utils.WithTransaction(readonly=True)
def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None,
cursor=None):
"""Returns number of flow output plugin log entries of a given flow."""
query = ("SELECT COUNT(*) "
"FROM flow_output_plugin_log_entries "
"FORCE INDEX (flow_output_plugin_log_entries_by_flow) "
"WHERE client_id = %s AND flow_id = %s AND output_plugin_id = %s ")
args = [
db_utils.ClientIDToInt(client_id),
db_utils.FlowIDToInt(flow_id), output_plugin_id
]
if with_type is not None:
query += "AND log_entry_type = %s"
args.append(int(with_type))
cursor.execute(query, args)
return cursor.fetchone()[0]
| |
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import densenet as dn
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch DenseNet Training')
parser.add_argument('--epochs', default=300, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--growth', default=12, type=int,
help='number of new channels per layer (default: 12)')
parser.add_argument('--droprate', default=0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--reduce', default=0.5, type=float,
help='compression rate in transition stage (default: 0.5)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='To not use bottleneck block')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='DenseNet_BC_100_12', type=str,
help='name of experiment')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.set_defaults(bottleneck=True)
parser.set_defaults(augment=True)
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
if args.tensorboard: configure("runs/%s"%(args.name))
# Data loading code
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
kwargs = {'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
# create model
model = dn.DenseNet3(args.layers, 10, args.growth, reduction=args.reduce,
bottleneck=args.bottleneck, dropRate=args.droprate)
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
# model = torch.nn.DataParallel(model).cuda()
model = model.cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov=True,
weight_decay=args.weight_decay)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
print('Best accuracy: ', best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "runs/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
lr = args.lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
# log to TensorBoard
if args.tensorboard:
log_value('learning_rate', lr, epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# version 1.02
import re
import os
import sys
import time
import lxml
import pickle
import requests
import urlparse
from bs4 import BeautifulSoup
from misc import *
def main():
from pprint import pprint
robot = WeiboRobot('', '') # username & password
info = robot.get_user_info('') # user id
pprint(info)
class WeiboRobot(requests.Session):
"""weibo WeiboRobot class"""
index_url = "http://weibo.cn"
login_url = "http://login.weibo.cn/login/"
# userinfo_urls
username_url = "http://weibo.cn/%s?st=%s"
follow_url = "http://weibo.cn/%s/follow?page=%d&st=%s"
info_url = "http://weibo.cn/%s/info?st=%s"
rank_url = "http://weibo.cn/%s/urank"
at_url = "http://weibo.cn/at/weibo?uid=%s"
search_url = "http://weibo.cn/search/mblog"
search_params = {'hideSearchFrame': None,
'keyword': None, 'page': None, 'st': None}
login_post = {'remember': 'on'}
# operation
attention_url = "http://weibo.cn/attention/add?uid=%s&rl=0&st=%s"
# ???
special_attention_url = "http://weibo.cn/attgroup/special?fuid=%s&st=%s"
# cookie store
cookie_dir = os.path.join(os.path.dirname(__file__), 'cookie/')
cookie_ext = '.cookie'
# sina ?st value
st_value = ""
headers_ua = {
'User-Agent': 'LG-GC900/V10a Obigo/WAP2.0 Profile/MIDP-2.1 Configuration/CLDC-1.1'}
visited_user = {}
def __init__(self, username, password):
requests.Session.__init__(self)
self.username = username
self.password = password
self.headers = self.headers_ua
self.login(self.username, self.password)
self.init_st() # init st token
def init_st(self):
req = self.get(self.index_url)
soup = BeautifulSoup(req.text)
parsed = self.parse_href(soup.form['action'])
self.st_value = parsed.get('st')[0]
def search_keyword(self, keywords, page):
"""return list of results"""
params = self.search_params
params['keyword'] = keywords
params['st'] = self.st_value
params['page'] = page
req = self.get(self.search_url, params=self.search_params)
if len(req.history) != 0:
print 'sina weibo search limit reached'
exit()
soup = BeautifulSoup(req.text, 'lxml')
result_list = []
for tweet in soup.find_all('div', class_='c'):
# div class=c
if tweet.get('id'):
if tweet.div.find('span', class_='cmt'):
pass
else:
tweet_id = tweet.get('id')[2:] # e.g. 'M_BcB181ToF'
tweet_screenid = tweet.div.a.string
tweet_userid = self.extract_userid(tweet.div.a['href'])
tweet_content = tweet.div.find(
'span', class_='ctt').__str__().lstrip(':')
result_list.append(
{'screenid': tweet_screenid, 'id': tweet_userid, 'tweetid': tweet_id, 'content': tweet_content})
return result_list
def login(self, username, password):
cookie_file = self.cookie_dir + self.username + self.cookie_ext
if os.path.isfile(cookie_file):
with open(cookie_file) as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
self.cookies = cookies
# todo
# import md5 m=md5.new('username')
# m.hexdigest()
else:
req = self.get(self.login_url)
soup = BeautifulSoup(req.text, 'lxml')
action = self.login_url + soup.form['action']
# generate post data
for tag in soup.form.find_all('input'):
if tag['name'] == 'mobile':
self.login_post[tag['name']] = self.username
elif 'password' in tag['name']:
self.login_post[tag['name']] = self.password
elif tag['name'] != 'remember':
self.login_post[tag['name']] = tag['value']
self.post(action, data=self.login_post)
with open(cookie_file, 'w') as f:
pickle.dump(
requests.utils.dict_from_cookiejar(self.cookies), f)
def check_login(self):
soup = BeautifulSoup(self.get(self.index_url).text)
if u'\u6211\u7684\u9996\u9875' in soup.title:
return True
else:
return False
def get_user_info(self, numid): # input numid!!
sex = None
name = None
area = None
birth = None
brief = None
rank = 0
loginday = 0
weibocount = 0
fans = 0
follow = 0
info_url = self.info_url % (str(numid), self.st_value)
urank_url = self.rank_url % str(numid)
user_index_url = self.username_url % (str(numid), self.st_value)
rank_page = self.get(urank_url).text
index_page = self.get(user_index_url).text
info_page = self.get(info_url).text
# print info_text
info_dict = {}
try:
rank = r_rank.search(info_page).group(1)
except AttributeError:
pass
try:
sex = r_sex.search(info_page).group(1)
except AttributeError:
pass
try:
name = r_name.search(info_page).group(1)
except AttributeError:
pass
try:
area = r_area.search(info_page).group(1)
except AttributeError:
pass
try:
birth = r_birth.search(info_page).group(1)
except AttributeError:
pass
try:
brief = r_brief.search(info_page).group(1)
except AttributeError:
pass
try:
loginday = r_loginday.search(rank_page).group(1)
except AttributeError:
pass
try:
weibocount = r_weibocount.search(index_page).group(1)
except AttributeError:
pass
try:
fans = r_fans.search(index_page).group(1)
except AttributeError:
pass
try:
follow = r_follow.search(index_page).group(1)
except AttributeError:
pass
info_dict = {'screenid': name, 'area':
area, 'sex': sex, 'birth': birth, 'rank': rank, 'brief': brief, 'loginday': loginday, 'weibocount': weibocount, 'fans': fans, 'follow': follow}
return info_dict
def id_to_numid(self, username): # name modified
try:
return self.visited_user[username]
except KeyError:
req = self.get(self.username_url % (username, self.st_value))
try:
numid = r_numid.search(req.text).group(1)
except AttributeError:
numid = None
return numid
@staticmethod
def extract_userid(url):
match = r_userid.search(url)
try:
return match.group(1) or match.group(2)
except AttributeError, e:
print 'url error'
return None
@staticmethod
def parse_href(query_str):
query = urlparse.urlparse(query_str).query
return urlparse.parse_qs(query) # {'st':['aaaa']}
if __name__ == '__main__':
main()
| |
# Copyright 2007-2016, Sjoerd de Vries
import sys, re, os
from collections import namedtuple, OrderedDict
from .macros import get_macros
from .typedef import typedef_parse
from ..exceptions import SilkSyntaxError
from lxml.builder import E
# from ..validate import is_valid_silktype
# Regular expressions
# quotes ( "...", '...' )
# triple quotes ( """...""", '''...''' )
# curly braces ( {...} )
single_quote_match = re.compile(r'(([\"\']).*?\2)')
triple_quote_match = re.compile(r'(\"\"\"[\w\Wn]*?\"\"\")')
curly_brace_match = re.compile(r'{[^{}]*?}')
"""
Mask signs to mark out masked-out regions
These mask signs are assumed to be not present in the text
TODO: replace them with rarely-used ASCII codes
(check that this works with Py3 also)
"""
mask_sign_triple_quote = "&"
mask_sign_single_quote = "*"
mask_sign_curly = "!"
BlockParseResult = namedtuple("BlockParseResult", "block_type block_head block block_docstring")
def parse(silktext):
"""Converts silktext to a dictionary with key (the name of the silk type) -> value (the lxml tree)"""
macros = get_macros()
result = E.silkspace()
blocks = divide_blocks(silktext)
for block in blocks:
block_type, block_head, bblock, block_docstring_dummy = parse_block(block)
if block_type is None:
continue
if bblock is None:
raise SilkSyntaxError("Non-comment text outside Type definitions is not understood: '%s'" % block)
if block_type != "Type":
raise SilkSyntaxError("Top-level {}-blocks other than Type are not understood: '%s'" % block_type)
block_head_words = block_head.split(":")
if len(block_head_words) > 2:
raise SilkSyntaxError("Type header '%s' can contain only one ':'" % block_head)
typename = block_head_words[0]
bases = []
if len(block_head_words) == 2:
bases = [b.strip() for b in block_head_words[1].split(",")]
res = typedef_parse(typename, bases, bblock)
result.append(res)
return result
def mask_characters(expression, search_text, target_text, mask_char):
"""Mask characters found by a regular expression with mask character. Mask characters will equal length of masked
string.
A different target text to search text may be used, but the developer must ensure that they are compatible.
This feature may be used to handle combinations of N mask_char(s) differently
:param expression: regex expression
:param search_text: text to search for matches
:param target_text: text to apply mask to
:param mask_char: character to replace masked characters
"""
matches = []
pos = 0
masked_text = ""
for match in expression.finditer(search_text):
matches.append(match)
masked_text += target_text[pos:match.start()] + mask_char * (match.end() - match.start())
pos = match.end()
masked_text += target_text[pos:]
return masked_text, matches
def divide_blocks(silktext):
"""Divides silktext into blocks.
A block is either a curly-brace block structure preceeded by a block type and a block head, or it is a single
line of text that is outside such a block structure.
Triple-quoted strings outside blocks are automatically removed.
Divide_blocks should be invoked
- first on the entire text,
- then on the contents of a block (Type blocks)
- then on the contents of a block-inside-a-block (form blocks, validate blocks, ...)
"""
# First, take silktext and mask out all triple quote text into s0
masked_triple_quote, _ = mask_characters(triple_quote_match, silktext, silktext, mask_sign_triple_quote)
# Then, take silktext and mask out all quoted text into masked_single_quote
# To prevent that we also mask out triple quotes, look for quotes only in masked_triple_quote
masked_single_quote, _ = mask_characters(single_quote_match, masked_triple_quote, silktext, mask_sign_single_quote)
# Now, look for curly braces in masked_single_quote, and mask them out iteratively (modifying masked_single_quote)
while True:
masked_curly_brace, matches = mask_characters(curly_brace_match, masked_single_quote, masked_single_quote,
mask_sign_curly)
if not len(matches):
break
masked_single_quote = masked_curly_brace
# Todo is this correct? - in original, mask00 (masked_curly_brace) is modified as `mask += mask00[pos:]`, not mask0
# Finally, look for triple quote regions in masked_single_quote, and mask them out
mask, _ = mask_characters(triple_quote_match, masked_single_quote, masked_single_quote, mask_sign_triple_quote)
# Now split the mask into newlines. Newlines inside curly blocks will have been masked out
lines = []
pos = 0
for line in mask.split("\n"):
end_pos = pos + len(line)
block = silktext[pos:end_pos]
if block:
lines.append(block)
pos = end_pos + len("\n")
return lines
def parse_block(blocktext):
"""
Parses the content of a block into four parts
- Block type: the first word
- Block head: the first line after the block type, before the curly braces
- Block: content between curly braces (None if no curly braces)
- Block docstring: commented content right after the start of the block
"""
masked_triple_quote, _ = mask_characters(triple_quote_match, blocktext, blocktext, mask_sign_triple_quote)
masked_single_quote, _ = mask_characters(single_quote_match, masked_triple_quote, blocktext, mask_sign_single_quote)
pre_block = blocktext
post_block = ""
blocks = []
while True:
pos = 0
masked_curly_braces = ""
block_contents = []
for match in curly_brace_match.finditer(masked_single_quote):
block_contents.append(blocktext[match.start():match.end()])
pre_block = blocktext[pos:match.start()]
masked_curly_braces += pre_block + mask_sign_curly * (match.end() - match.start())
pos = match.end()
masked_curly_braces += masked_single_quote[pos:]
if pos == 0:
break
else:
post_block = blocktext[pos:]
blocks = block_contents
masked_single_quote = masked_curly_braces
if len(blocks) > 1:
raise SilkSyntaxError("compile error: invalid statement\n%s\nStatement must contain a single {} block" % blocktext)
if post_block.strip():
raise SilkSyntaxError("compile error: invalid statement\n%s\nStatement must be empty after {} block" % blocktext)
elif blocks:
block = blocks[0][1:-1]
else:
block = None
pre_block = pre_block.strip()
pre_block_masked_single_quote, _ = mask_characters(single_quote_match, pre_block, pre_block, mask_sign_single_quote)
# Find docstring
comment_start = pre_block_masked_single_quote.find("#")
if comment_start > -1:
block_docstring = pre_block[comment_start+1:].strip('\n') + '\n'
pre_block = pre_block[:comment_start]
else:
block_docstring = ""
if block is not None:
current_block = block
while True:
original_length = len(current_block)
current_block = current_block.lstrip().lstrip("\n")
if len(current_block) == original_length:
break
match = triple_quote_match.search(current_block)
if match is not None and match.start() == 0:
block_docstring += current_block[match.start()+len('"""'):match.end()-len('"""')]
else:
match = triple_quote_match.search(blocktext)
if match is not None and match.start() == 0:
match0, match1 = match.start() + len('"""'), match.end()-len('"""')
block_docstring = blocktext[match0:match1]
pre_block = blocktext[:match.start()]
block_type = None
block_head = None
if pre_block:
block_type = pre_block.split()[0]
block_head = pre_block[len(block_type):].strip()
return BlockParseResult(block_type, block_head, block, block_docstring)
| |
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
from . import util
from . import bitcoin
from .bitcoin import *
MAX_TARGET = 0x00000000FFFF0000000000000000000000000000000000000000000000000000
def serialize_header(res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(s, height):
hex_to_int = lambda s: int('0x' + bh2u(s[::-1]), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['block_height'] = height
return h
def hash_header(header):
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
return hash_encode(Hash(bfh(serialize_header(header))))
blockchains = {}
def read_blockchains(config):
blockchains[0] = Blockchain(config, 0, None)
fdir = os.path.join(util.get_headers_dir(config), 'forks')
if not os.path.exists(fdir):
os.mkdir(fdir)
l = filter(lambda x: x.startswith('fork_'), os.listdir(fdir))
l = sorted(l, key = lambda x: int(x.split('_')[1]))
for filename in l:
checkpoint = int(filename.split('_')[2])
parent_id = int(filename.split('_')[1])
b = Blockchain(config, checkpoint, parent_id)
h = b.read_header(b.checkpoint)
if b.parent().can_connect(h, check_height=False):
blockchains[b.checkpoint] = b
else:
util.print_error("cannot connect", filename)
return blockchains
def check_header(header):
if type(header) is not dict:
return False
for b in blockchains.values():
if b.check_header(header):
return b
return False
def can_connect(header):
for b in blockchains.values():
if b.can_connect(header):
return b
return False
class Blockchain(util.PrintError):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config, checkpoint, parent_id):
self.config = config
self.catch_up = None # interface catching up
self.checkpoint = checkpoint
self.checkpoints = bitcoin.NetworkConstants.CHECKPOINTS
self.parent_id = parent_id
self.lock = threading.Lock()
with self.lock:
self.update_size()
def parent(self):
return blockchains[self.parent_id]
def get_max_child(self):
children = list(filter(lambda y: y.parent_id==self.checkpoint, blockchains.values()))
return max([x.checkpoint for x in children]) if children else None
def get_checkpoint(self):
mc = self.get_max_child()
return mc if mc is not None else self.checkpoint
def get_branch_size(self):
return self.height() - self.get_checkpoint() + 1
def get_name(self):
return self.get_hash(self.get_checkpoint()).lstrip('00')[0:10]
def check_header(self, header):
header_hash = hash_header(header)
height = header.get('block_height')
return header_hash == self.get_hash(height)
def fork(parent, header):
checkpoint = header.get('block_height')
self = Blockchain(parent.config, checkpoint, parent.checkpoint)
open(self.path(), 'w+').close()
self.save_header(header)
return self
def height(self):
return self.checkpoint + self.size() - 1
def size(self):
with self.lock:
return self._size
def update_size(self):
p = self.path()
self._size = os.path.getsize(p)//80 if os.path.exists(p) else 0
def verify_header(self, header, prev_hash, target):
_hash = hash_header(header)
if prev_hash != header.get('prev_block_hash'):
raise BaseException("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if bitcoin.NetworkConstants.TESTNET:
return
bits = self.target_to_bits(target)
if bits != header.get('bits'):
raise BaseException("bits mismatch: %s vs %s" % (bits, header.get('bits')))
if int('0x' + _hash, 16) > target:
raise BaseException("insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target))
def verify_chunk(self, index, data):
num = len(data) // 80
prev_hash = self.get_hash(index * 2016 - 1)
target = self.get_target(index-1)
for i in range(num):
raw_header = data[i*80:(i+1) * 80]
header = deserialize_header(raw_header, index*2016 + i)
self.verify_header(header, prev_hash, target)
prev_hash = hash_header(header)
def path(self):
d = util.get_headers_dir(self.config)
filename = 'blockchain_headers' if self.parent_id is None else os.path.join('forks', 'fork_%d_%d'%(self.parent_id, self.checkpoint))
return os.path.join(d, filename)
def save_chunk(self, index, chunk):
filename = self.path()
d = (index * 2016 - self.checkpoint) * 80
if d < 0:
chunk = chunk[-d:]
d = 0
self.write(chunk, d)
self.swap_with_parent()
def swap_with_parent(self):
if self.parent_id is None:
return
parent_branch_size = self.parent().height() - self.checkpoint + 1
if parent_branch_size >= self.size():
return
self.print_error("swap", self.checkpoint, self.parent_id)
parent_id = self.parent_id
checkpoint = self.checkpoint
parent = self.parent()
with open(self.path(), 'rb') as f:
my_data = f.read()
with open(parent.path(), 'rb') as f:
f.seek((checkpoint - parent.checkpoint)*80)
parent_data = f.read(parent_branch_size*80)
self.write(parent_data, 0)
parent.write(my_data, (checkpoint - parent.checkpoint)*80)
# store file path
for b in blockchains.values():
b.old_path = b.path()
# swap parameters
self.parent_id = parent.parent_id; parent.parent_id = parent_id
self.checkpoint = parent.checkpoint; parent.checkpoint = checkpoint
self._size = parent._size; parent._size = parent_branch_size
# move files
for b in blockchains.values():
if b in [self, parent]: continue
if b.old_path != b.path():
self.print_error("renaming", b.old_path, b.path())
os.rename(b.old_path, b.path())
# update pointers
blockchains[self.checkpoint] = self
blockchains[parent.checkpoint] = parent
def write(self, data, offset):
filename = self.path()
with self.lock:
with open(filename, 'rb+') as f:
if offset != self._size*80:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
def save_header(self, header):
delta = header.get('block_height') - self.checkpoint
data = bfh(serialize_header(header))
assert delta == self.size()
assert len(data) == 80
self.write(data, delta*80)
self.swap_with_parent()
def read_header(self, height):
assert self.parent_id != self.checkpoint
if height < 0:
return
if height < self.checkpoint:
return self.parent().read_header(height)
if height > self.height():
return
delta = height - self.checkpoint
name = self.path()
if os.path.exists(name):
with open(name, 'rb') as f:
f.seek(delta * 80)
h = f.read(80)
if h == bytes([0])*80:
return None
return deserialize_header(h, height)
def get_hash(self, height):
if height == -1:
return '0000000000000000000000000000000000000000000000000000000000000000'
elif height == 0:
return bitcoin.NetworkConstants.GENESIS
elif height < len(self.checkpoints) * 2016:
assert (height+1) % 2016 == 0
index = height // 2016
h, t = self.checkpoints[index]
return h
else:
return hash_header(self.read_header(height))
def get_target(self, index):
# compute target from chunk x, used in chunk x+1
if bitcoin.NetworkConstants.TESTNET:
return 0, 0
if index == -1:
return 0x1d00ffff, MAX_TARGET
if index < len(self.checkpoints):
h, t = self.checkpoints[index]
return t
# new target
first = self.read_header(index * 2016)
last = self.read_header(index * 2016 + 2015)
bits = last.get('bits')
target = self.bits_to_target(bits)
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = 14 * 24 * 60 * 60
nActualTimespan = max(nActualTimespan, nTargetTimespan // 4)
nActualTimespan = min(nActualTimespan, nTargetTimespan * 4)
new_target = min(MAX_TARGET, (target * nActualTimespan) // nTargetTimespan)
return new_target
def bits_to_target(self, bits):
bitsN = (bits >> 24) & 0xff
if not (bitsN >= 0x03 and bitsN <= 0x1d):
raise BaseException("First part of bits should be in [0x03, 0x1d]")
bitsBase = bits & 0xffffff
if not (bitsBase >= 0x8000 and bitsBase <= 0x7fffff):
raise BaseException("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN-3))
def target_to_bits(self, target):
c = ("%064x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int('0x' + c[:6], 16)
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def can_connect(self, header, check_height=True):
height = header['block_height']
if check_height and self.height() != height - 1:
#self.print_error("cannot connect at height", height)
return False
if height == 0:
return hash_header(header) == bitcoin.NetworkConstants.GENESIS
try:
prev_hash = self.get_hash(height - 1)
except:
return False
if prev_hash != header.get('prev_block_hash'):
return False
target = self.get_target(height // 2016 - 1)
try:
self.verify_header(header, prev_hash, target)
except BaseException as e:
return False
return True
def connect_chunk(self, idx, hexdata):
try:
data = bfh(hexdata)
self.verify_chunk(idx, data)
#self.print_error("validated chunk %d" % idx)
self.save_chunk(idx, data)
return True
except BaseException as e:
self.print_error('verify_chunk failed', str(e))
return False
def get_checkpoints(self):
# for each chunk, store the hash of the last block and the target after the chunk
cp = []
n = self.height() // 2016
for index in range(n):
h = self.get_hash((index+1) * 2016 -1)
target = self.get_target(index)
cp.append((h, target))
return cp
| |
'''
update:
2014/09/03:
softmax in the last layer
customize:
only classify 1
'''
import theano
import theano.tensor as T
import gzip
import cPickle
import numpy
import time
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
self.input = input
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class ANN(object):
def __init__(self, n_in, n_out, lmbd = 0.01, hiddens = [10]):
x = T.matrix('x')
y = T.ivector('y')
lr = T.scalar('lr')
rng = numpy.random.RandomState(numpy.random.randint(2 ** 30))
params = []
hid_layers = []
L2 = .0
n_hid = hiddens + [n_out]
for ind, ele in enumerate(n_hid):
if ind == 0:
input = x
n_in = n_in
else:
input = hid_layers[-1].output
n_in = n_hid[ind-1]
if ind == len(n_hid) - 1:
activation = T.nnet.softmax
else:
activation = T.nnet.sigmoid
layer = HiddenLayer(rng, input = input, n_in = n_in, n_out = ele, activation = activation)
hid_layers.append( layer)
L2 += T.sum(layer.W ** 2)
params.extend([layer.W, layer.b])
nl = -T.mean(T.log(hid_layers[-1].output)[T.arange(y.shape[0]), y])
cost = nl + L2 * lmbd
grads = T.grad(cost, params)
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - lr * grad_i))
y_pred = T.argmax(hid_layers[-1].output, 1)
errors = T.mean(T.neq(y_pred, y))
self.n_in = n_in
self.n_out = n_out
self.hiddens = hiddens
self.x = x
self.y = y
self.lr = lr
self.cost = cost
self.errors = errors
self.updates = updates
self.pred = y_pred
self.time = []
self.hid_layers = hid_layers
def fit(self, datasets, batch_size = 500, n_epochs = 200, lr = 0.01):
''' without validation'''
index = T.lscalar()
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_test_batches /= batch_size
train_model = theano.function([index], self.cost,
updates = self.updates,
givens = {
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size],
self.lr: lr})
test_model = theano.function([], self.errors,
givens = {
self.x: test_set_x,
self.y: test_set_y})
debug_f = theano.function([index], self.errors,
givens = {
self.x: test_set_x[index * batch_size : (index+1) * batch_size],
self.y: test_set_y[index * batch_size : (index+1) * batch_size]})
# print numpy.mean([debug_f(i) for i in xrange(n_test_batches)])
print(test_model())
print '...training'
maxiter = n_epochs
iteration = 0
while iteration < maxiter:
start_time = time.time()
iteration += 1
print 'iteration %d' % iteration
for minibatch_index in xrange(n_train_batches):
print '\tL of (%03d/%03d) = %f\r' % (minibatch_index, n_train_batches, train_model(minibatch_index)),
print ''
print 'error = %f' % test_model()
self.time.append(time.time()-start_time)
def __repr__(self):
return '<CNN: %r; HID: %r>' % (self.nkerns, self.nhiddens)
def pred(self, x):
return theano.function([], T.argmax(self.hid_layers[-1].output, 1),
givens = {self.x: x})()
def prob(self, x):
return theano.function([], self.hid_layers[-1].output,
givens = {self.x: x})()
def __repr__(self):
return '<ANN:%r-%r-%r>' % (self.n_in, self.hiddens, self.n_out)
def test_model(self, test_set):
test_set_x, test_set_y = test_set
test_model = theano.function([], self.errors,
givens = {
self.x: test_set_x,
self.y: test_set_y})
return test_model()
def train_x(self):
import matplotlib.pyplot as plt
x = theano.shared( numpy.random.uniform(0, 1, size=(1, 28*28)) )
lr = 0.1
real_loss = self.hid_layers[-1].output[0, 1]
loss = -T.log(self.hid_layers[-1].output[0, 1])
grad_x = T.grad(loss, self.x)
updated_x = x - lr * grad_x
train = theano.function([], [loss, real_loss], updates={x: updated_x/updated_x.max()}, givens={self.x: x})
plt.ion()
plt.imshow(x.get_value().reshape(28, 28), cmap='gray')
plt.draw()
maxiter = 1000
iteration = 0
while iteration < maxiter:
iteration += 1
print train()
if iteration % 50 == 0:
plt.imshow(x.get_value().reshape(28, 28), cmap='gray', interpolation='None')
plt.draw()
f = theano.function([], loss, givens={self.x:x})
print f()
def load_data(dataset, num = None):
print '... loading data'
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
train_set = (numpy.concatenate([train_set[0], valid_set[0]], 0), numpy.concatenate([train_set[1], valid_set[1]], 0))
f.close()
def shared_dataset(data_xy, borrow=True, num = None):
data_x, data_y = data_xy
if num:
data_x = data_x[:num]
data_y = data_y[:num]
# data_y = boarden(10, data_y)
data_y[data_y!=1] = 0
# print data_y.mean()
size = int(data_x.shape[1]**.5)
# data_x = data_x.reshape(data_x.shape[0], -1)
print data_x.shape, data_y.shape
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set, num = num)
# valid_set_x, valid_set_y = shared_dataset(valid_set, num = num)
train_set_x, train_set_y = shared_dataset(train_set, num = num)
rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
if __name__ == '__main__':
theano.config.exception_verbosity='high'
theano.config.on_unused_input='ignore'
import cPickle
training = 0
datasets = load_data('../../Data/mnist/mnist.pkl.gz')
if training:
cl = ANN(28 * 28, 2, lmbd=0.001, hiddens = [20, 20, 20])
cl.fit(datasets, lr = 0.01)
cPickle.dump(cl, open('ann.dat', 'wb'))
else:
cl = cPickle.load(open('ann.dat', 'rb'))
# print cl.test_model(datasets[1])
cl.train_x()
| |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from datetime import datetime
import time
from uuid import uuid1, uuid4
import uuid
from cassandra.cluster import Session
from cassandra import InvalidRequest
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.connection import NOT_SET
import mock
from cassandra.cqlengine import functions
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from cassandra.cqlengine import query
from cassandra.cqlengine.query import QueryException, BatchQuery
from datetime import timedelta
from datetime import tzinfo
from cassandra.cqlengine import statements
from cassandra.cqlengine import operators
from cassandra.util import uuid_from_time
from cassandra.cqlengine.connection import get_session
from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21
from tests.integration.cqlengine import execute_count
class TzOffset(tzinfo):
"""Minimal implementation of a timezone offset to help testing with timezone
aware datetimes.
"""
def __init__(self, offset):
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return 'TzOffset: {}'.format(self._offset.hours)
def dst(self, dt):
return timedelta(0)
class TestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(primary_key=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer()
class IndexedTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
class IndexedCollectionsTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
test_list = columns.List(columns.Integer, index=True)
test_set = columns.Set(columns.Integer, index=True)
test_map = columns.Map(columns.Text, columns.Integer, index=True)
test_list_no_index = columns.List(columns.Integer, index=False)
test_set_no_index = columns.Set(columns.Integer, index=False)
test_map_no_index = columns.Map(columns.Text, columns.Integer, index=False)
class TestMultiClusteringModel(Model):
one = columns.Integer(primary_key=True)
two = columns.Integer(primary_key=True)
three = columns.Integer(primary_key=True)
class TestQuerySetOperation(BaseCassEngTestCase):
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = TestModel.filter(TestModel.test_id == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(TestModel.expected_result >= 1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_using_invalid_column_names_in_filter_kwargs_raises_error(self):
"""
Tests that using invalid or nonexistant column names for filter args raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(nonsense=5)
def test_using_nonexistant_column_names_in_query_args_raises_error(self):
"""
Tests that using invalid or nonexistant columns for query args raises an error
"""
with self.assertRaises(AttributeError):
TestModel.objects(TestModel.nonsense == 5)
def test_using_non_query_operators_in_query_args_raises_error(self):
"""
Tests that providing query args that are not query operator instances raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(5)
def test_queryset_is_immutable(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
assert len(query1._where) == 1
def test_queryset_limit_immutability(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset with same limit
"""
query1 = TestModel.objects(test_id=5).limit(1)
assert query1._limit == 1
query2 = query1.filter(expected_result__gte=1)
assert query2._limit == 1
query3 = query1.filter(expected_result__gte=1).limit(2)
assert query1._limit == 1
assert query3._limit == 2
def test_the_all_method_duplicates_queryset(self):
"""
Tests that calling all on a queryset with previously defined filters duplicates queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
query3 = query2.all()
assert query3 == query2
def test_queryset_with_distinct(self):
"""
Tests that calling distinct on a queryset w/without parameter are evaluated properly.
"""
query1 = TestModel.objects.distinct()
self.assertEqual(len(query1._distinct_fields), 1)
query2 = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(query2._distinct_fields), 1)
query3 = TestModel.objects.distinct(['test_id', 'attempt_id'])
self.assertEqual(len(query3._distinct_fields), 2)
def test_defining_only_fields(self):
"""
Tests defining only fields
@since 3.5
@jira_ticket PYTHON-560
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple only definition
q = TestModel.objects.only(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
with self.assertRaises(query.QueryException):
TestModel.objects.only(['nonexistent_field'])
# Cannot define more than once only fields
with self.assertRaises(query.QueryException):
TestModel.objects.only(['description']).only(['attempt_id'])
# only with defer fields
q = TestModel.objects.only(['attempt_id', 'description'])
q = q.defer(['description'])
self.assertEqual(q._select_fields(), ['attempt_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.only(['description'])
q = q.defer(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
q = TestModel.objects.filter(test_id=0).only(['test_id', 'attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['attempt_id', 'description'])
# no fields to select
with self.assertRaises(query.QueryException):
q = TestModel.objects.only(['test_id']).defer(['test_id'])
q._select_fields()
with self.assertRaises(query.QueryException):
q = TestModel.objects.filter(test_id=0).only(['test_id'])
q._select_fields()
def test_defining_defer_fields(self):
"""
Tests defining defer fields
@since 3.5
@jira_ticket PYTHON-560
@jira_ticket PYTHON-599
@expected_result deferred fields should not be returned
@test_category object_mapper
"""
# simple defer definition
q = TestModel.objects.defer(['attempt_id', 'description'])
self.assertEqual(q._select_fields(), ['test_id', 'expected_result', 'test_result'])
with self.assertRaises(query.QueryException):
TestModel.objects.defer(['nonexistent_field'])
# defer more than one
q = TestModel.objects.defer(['attempt_id', 'description'])
q = q.defer(['expected_result'])
self.assertEqual(q._select_fields(), ['test_id', 'test_result'])
# defer with only
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description', 'test_id'])
self.assertEqual(q._select_fields(), ['test_id'])
# Eliminate all results confirm exception is thrown
q = TestModel.objects.defer(['description', 'attempt_id'])
q = q.only(['description'])
with self.assertRaises(query.QueryException):
q._select_fields()
# implicit defer
q = TestModel.objects.filter(test_id=0)
self.assertEqual(q._select_fields(), ['attempt_id', 'description', 'expected_result', 'test_result'])
# when all fields are defered, it fallbacks select the partition keys
q = TestModel.objects.defer(['test_id', 'attempt_id', 'description', 'expected_result', 'test_result'])
self.assertEqual(q._select_fields(), ['test_id'])
class BaseQuerySetUsage(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseQuerySetUsage, cls).setUpClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
sync_table(TestModel)
sync_table(IndexedTestModel)
sync_table(TestMultiClusteringModel)
TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20)
TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45)
IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30)
IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30)
IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25)
IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25)
IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25)
IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25)
IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20)
IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40)
IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60,
test_result=40)
IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70,
test_result=45)
IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75,
test_result=45)
if(CASSANDRA_VERSION >= '2.1'):
drop_table(IndexedCollectionsTestModel)
sync_table(IndexedCollectionsTestModel)
IndexedCollectionsTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75,
test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 3})
IndexedCollectionsTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75,
test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]),
test_map={'1': 5, '2': 6, '3': 7})
IndexedCollectionsTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75,
test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]),
test_map={'1': 1, '2': 2, '3': 42})
IndexedCollectionsTestModel.objects.create(test_id=15, attempt_id=4, description='list14', expected_result=75,
test_result=45, test_list_no_index=[1, 2, 3], test_set_no_index=set([1, 2, 3]),
test_map_no_index={'1': 1, '2': 2, '3': 42})
@classmethod
def tearDownClass(cls):
super(BaseQuerySetUsage, cls).tearDownClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
drop_table(TestMultiClusteringModel)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
@execute_count(2)
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(test_id=0)
assert q.count() == 4
@execute_count(2)
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0)
assert q.count() == 4
@execute_count(3)
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = TestModel.objects(test_id=0)
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = TestModel.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
@execute_count(3)
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(3)
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0)
m = q.get(TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(1)
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.get(test_id=100)
@execute_count(1)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(TestModel.MultipleObjectsReturned):
TestModel.objects.get(test_id=1)
def test_allow_filtering_flag(self):
"""
"""
@execute_count(4)
def test_non_quality_filtering():
class NonEqualityFilteringModel(Model):
example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key
example_type = columns.Integer(index=True)
created_at = columns.DateTime()
drop_table(NonEqualityFilteringModel)
sync_table(NonEqualityFilteringModel)
# setup table, etc.
NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now())
qa = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
num = qa.count()
assert num == 1, num
class TestQuerySetDistinct(BaseQuerySetUsage):
@execute_count(1)
def test_distinct_without_parameter(self):
q = TestModel.objects.distinct()
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_parameter(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(len(q), 3)
@execute_count(1)
def test_distinct_with_filter(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(len(q), 2)
@execute_count(1)
def test_distinct_with_non_partition(self):
with self.assertRaises(InvalidRequest):
q = TestModel.objects.distinct(['description']).filter(test_id__in=[1, 2])
len(q)
@execute_count(1)
def test_zero_result(self):
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[52])
self.assertEqual(len(q), 0)
@greaterthancass21
@execute_count(2)
def test_distinct_with_explicit_count(self):
q = TestModel.objects.distinct(['test_id'])
self.assertEqual(q.count(), 3)
q = TestModel.objects.distinct(['test_id']).filter(test_id__in=[1, 2])
self.assertEqual(q.count(), 2)
class TestQuerySetOrdering(BaseQuerySetUsage):
@execute_count(2)
def test_order_by_success_case(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
q = q.order_by('-attempt_id')
expected_order.reverse()
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
def test_ordering_by_non_second_primary_keys_fail(self):
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('test_id')
# kwarg filtering
with self.assertRaises(query.QueryException):
TestModel.objects(TestModel.test_id == 0).order_by('test_id')
def test_ordering_by_non_primary_keys_fails(self):
with self.assertRaises(query.QueryException):
TestModel.objects(test_id=0).order_by('description')
def test_ordering_on_indexed_columns_fails(self):
with self.assertRaises(query.QueryException):
IndexedTestModel.objects(test_id=0).order_by('attempt_id')
@execute_count(8)
def test_ordering_on_multiple_clustering_columns(self):
TestMultiClusteringModel.create(one=1, two=1, three=4)
TestMultiClusteringModel.create(one=1, two=1, three=2)
TestMultiClusteringModel.create(one=1, two=1, three=5)
TestMultiClusteringModel.create(one=1, two=1, three=1)
TestMultiClusteringModel.create(one=1, two=1, three=3)
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three')
assert [r.three for r in results] == [5, 4, 3, 2, 1]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
class TestQuerySetSlicing(BaseQuerySetUsage):
@execute_count(1)
def test_out_of_range_index_raises_error(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
with self.assertRaises(IndexError):
q[10]
@execute_count(1)
def test_array_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for i in range(len(q)):
assert q[i].attempt_id == expected_order[i]
@execute_count(1)
def test_negative_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
assert q[-1].attempt_id == expected_order[-1]
assert q[-2].attempt_id == expected_order[-2]
@execute_count(1)
def test_slicing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[1:3], expected_order[1:3]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[0:3:2], expected_order[0:3:2]):
self.assertEqual(model.attempt_id, expect)
@execute_count(1)
def test_negative_slicing(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[-3:], expected_order[-3:]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[:-1], expected_order[:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[1:-1], expected_order[1:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1], expected_order[-3:-1]):
self.assertEqual(model.attempt_id, expect)
for model, expect in zip(q[-3:-1:2], expected_order[-3:-1:2]):
self.assertEqual(model.attempt_id, expect)
class TestQuerySetValidation(BaseQuerySetUsage):
def test_primary_key_or_index_must_be_specified(self):
"""
Tests that queries that don't have an equals relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_result=25)
list([i for i in q])
def test_primary_key_or_index_must_have_equal_relation_filter(self):
"""
Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id__gt=0)
list([i for i in q])
@greaterthancass20
@execute_count(7)
def test_indexed_field_can_be_queried(self):
"""
Tests that queries on an indexed field will work without any primary key relations specified
"""
q = IndexedTestModel.objects(test_result=25)
self.assertEqual(q.count(), 4)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.objects.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
class TestQuerySetDelete(BaseQuerySetUsage):
@execute_count(9)
def test_delete(self):
TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45)
assert TestModel.objects.count() == 16
assert TestModel.objects(test_id=3).count() == 4
TestModel.objects(test_id=3).delete()
assert TestModel.objects.count() == 12
assert TestModel.objects(test_id=3).count() == 0
def test_delete_without_partition_key(self):
""" Tests that attempting to delete a model without defining a partition key fails """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
def test_delete_without_any_where_args(self):
""" Tests that attempting to delete a whole table without any arguments will fail """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
@unittest.skipIf(CASSANDRA_VERSION < '3.0', "range deletion was introduce in C* 3.0, currently running {0}".format(CASSANDRA_VERSION))
@execute_count(18)
def test_range_deletion(self):
"""
Tests that range deletion work as expected
"""
for i in range(10):
TestMultiClusteringModel.objects().create(one=1, two=i, three=i)
TestMultiClusteringModel.objects(one=1, two__gte=0, two__lte=3).delete()
self.assertEqual(6, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__gt=3, two__lt=5).delete()
self.assertEqual(5, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one=1, two__in=[8, 9]).delete()
self.assertEqual(3, len(TestMultiClusteringModel.objects.all()))
TestMultiClusteringModel.objects(one__in=[1], two__gte=0).delete()
self.assertEqual(0, len(TestMultiClusteringModel.objects.all()))
class TimeUUIDQueryModel(Model):
partition = columns.UUID(primary_key=True)
time = columns.TimeUUID(primary_key=True)
data = columns.Text(required=False)
class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).setUpClass()
sync_table(TimeUUIDQueryModel)
@classmethod
def tearDownClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass()
drop_table(TimeUUIDQueryModel)
@execute_count(7)
def test_tzaware_datetime_support(self):
"""Test that using timezone aware datetime instances works with the
MinTimeUUID/MaxTimeUUID functions.
"""
pk = uuid4()
midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))
midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))
# Assert pre-condition that we have the same logical point in time
assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()
assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc - timedelta(minutes=1)),
data='1')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc),
data='2')
TimeUUIDQueryModel.create(
partition=pk,
time=uuid_from_time(midpoint_utc + timedelta(minutes=1)),
data='3')
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]
@execute_count(8)
def test_success_case(self):
""" Test that the min and max time uuid functions work as expected """
pk = uuid4()
startpoint = datetime.utcnow()
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=1)), data='1')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=2)), data='2')
midpoint = startpoint + timedelta(seconds=3)
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=4)), data='3')
TimeUUIDQueryModel.create(partition=pk, time=uuid_from_time(startpoint + timedelta(seconds=5)), data='4')
# test kwarg filtering
q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))
q = [d for d in q]
self.assertEqual(len(q), 2, msg="Got: %s" % q)
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
# test query expression filtering
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)
)
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)
)
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
class TestInOperator(BaseQuerySetUsage):
@execute_count(1)
def test_kwarg_success_case(self):
""" Tests the in operator works with the kwarg query method """
q = TestModel.filter(test_id__in=[0, 1])
assert q.count() == 8
@execute_count(1)
def test_query_expression_success_case(self):
""" Tests the in operator works with the query expression query method """
q = TestModel.filter(TestModel.test_id.in_([0, 1]))
assert q.count() == 8
@execute_count(5)
def test_bool(self):
"""
Adding coverage to cqlengine for bool types.
@since 3.6
@jira_ticket PYTHON-596
@expected_result bool results should be filtered appropriately
@test_category object_mapper
"""
class bool_model(Model):
k = columns.Integer(primary_key=True)
b = columns.Boolean(primary_key=True)
v = columns.Integer(default=3)
sync_table(bool_model)
bool_model.create(k=0, b=True)
bool_model.create(k=0, b=False)
self.assertEqual(len(bool_model.objects.all()), 2)
self.assertEqual(len(bool_model.objects.filter(k=0, b=True)), 1)
self.assertEqual(len(bool_model.objects.filter(k=0, b=False)), 1)
@execute_count(3)
def test_bool_filter(self):
"""
Test to ensure that we don't translate boolean objects to String unnecessarily in filter clauses
@since 3.6
@jira_ticket PYTHON-596
@expected_result We should not receive a server error
@test_category object_mapper
"""
class bool_model2(Model):
k = columns.Boolean(primary_key=True)
b = columns.Integer(primary_key=True)
v = columns.Text()
drop_table(bool_model2)
sync_table(bool_model2)
bool_model2.create(k=True, b=1, v='a')
bool_model2.create(k=False, b=1, v='b')
self.assertEqual(len(list(bool_model2.objects(k__in=(True, False)))), 2)
@greaterthancass20
class TestContainsOperator(BaseQuerySetUsage):
@execute_count(6)
def test_kwarg_success_case(self):
""" Tests the CONTAINS operator works with the kwarg query method """
q = IndexedCollectionsTestModel.filter(test_list__contains=1)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_list__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_set__contains=3)
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(test_set__contains=13)
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(test_map__contains=42)
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(test_map__contains=13)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_list_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_set_no_index__contains=1)
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(test_map_no_index__contains=1)
self.assertEqual(q.count(), 0)
@execute_count(6)
def test_query_expression_success_case(self):
""" Tests the CONTAINS operator works with the query expression query method """
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(1))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_list.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(3))
self.assertEqual(q.count(), 2)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_set.contains_(13))
self.assertEqual(q.count(), 0)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(42))
self.assertEqual(q.count(), 1)
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map.contains_(13))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
with self.assertRaises(QueryException):
q = IndexedCollectionsTestModel.filter(IndexedCollectionsTestModel.test_map_no_index.contains_(1))
self.assertEqual(q.count(), 0)
class TestValuesList(BaseQuerySetUsage):
@execute_count(2)
def test_values_list(self):
q = TestModel.objects.filter(test_id=0, attempt_id=1)
item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first()
assert item == [0, 1, 'try2', 10, 30]
item = q.values_list('expected_result', flat=True).first()
assert item == 10
class TestObjectsProperty(BaseQuerySetUsage):
@execute_count(1)
def test_objects_property_returns_fresh_queryset(self):
assert TestModel.objects._result_cache is None
len(TestModel.objects) # evaluate queryset
assert TestModel.objects._result_cache is None
class PageQueryTests(BaseCassEngTestCase):
@execute_count(3)
def test_paged_result_handling(self):
if PROTOCOL_VERSION < 2:
raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION))
# addresses #225
class PagingTest(Model):
id = columns.Integer(primary_key=True)
val = columns.Integer()
sync_table(PagingTest)
PagingTest.create(id=1, val=1)
PagingTest.create(id=2, val=2)
session = get_session()
with mock.patch.object(session, 'default_fetch_size', 1):
results = PagingTest.objects()[:]
assert len(results) == 2
class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage):
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects())
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(0.5))
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
list(TestModel.objects().timeout(None))
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
class DMLQueryTimeoutTestCase(BaseQuerySetUsage):
def setUp(self):
self.model = TestModel(test_id=1, attempt_id=1, description='timeout test')
super(DMLQueryTimeoutTestCase, self).setUp()
def test_default_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(0.5).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], 0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute') as mock_execute:
self.model.timeout(None).save()
self.assertEqual(mock_execute.call_args[-1]['timeout'], None)
def test_timeout_then_batch(self):
b = query.BatchQuery()
m = self.model.timeout(None)
with self.assertRaises(AssertionError):
m.batch(b)
def test_batch_then_timeout(self):
b = query.BatchQuery()
m = self.model.batch(b)
with self.assertRaises(AssertionError):
m.timeout(0.5)
class DBFieldModel(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(db_field='e', index=True)
class DBFieldModelMixed1(Model):
k0 = columns.Integer(partition_key=True, db_field='a')
k1 = columns.Integer(partition_key=True)
c0 = columns.Integer(primary_key=True, db_field='c')
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True)
class DBFieldModelMixed2(Model):
k0 = columns.Integer(partition_key=True)
k1 = columns.Integer(partition_key=True, db_field='b')
c0 = columns.Integer(primary_key=True)
v0 = columns.Integer(db_field='d')
v1 = columns.Integer(index=True, db_field='e')
class TestModelQueryWithDBField(BaseCassEngTestCase):
def setUp(cls):
super(TestModelQueryWithDBField, cls).setUpClass()
cls.model_list = [DBFieldModel, DBFieldModelMixed1, DBFieldModelMixed2]
for model in cls.model_list:
sync_table(model)
def tearDown(cls):
super(TestModelQueryWithDBField, cls).tearDownClass()
for model in cls.model_list:
drop_table(model)
@execute_count(33)
def test_basic_crud(self):
"""
Tests creation update and delete of object model queries that are using db_field mappings.
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 2, 'c0': 3, 'v0': 4, 'v1': 5}
# create
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# create
values['v0'] = 101
i.update(v0=values['v0'])
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
# delete
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
i = model.create(**values)
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertEqual(i, model(**values))
i.delete()
model.objects(k0=i.k0, k1=i.k1).delete()
i = model.objects(k0=i.k0, k1=i.k1).first()
self.assertIsNone(i)
@execute_count(21)
def test_slice(self):
"""
Tests slice queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 3, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0=i.c0).count(), 1)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__lt=i.c0).count(), len(clustering_values[:-1]))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, c0__gt=0).count(), len(clustering_values[1:]))
@execute_count(15)
def test_order(self):
"""
Tests order by queries for object models that are using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 4, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('c0').first().c0, clustering_values[0])
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).order_by('-c0').first().c0, clustering_values[-1])
@execute_count(15)
def test_index(self):
"""
Tests queries using index fields for object models using db_field mapping
@since 3.1
@jira_ticket PYTHON-351
@expected_result results are properly retrieved without errors
@test_category object_mapper
"""
for model in self.model_list:
values = {'k0': 1, 'k1': 5, 'c0': 3, 'v0': 4, 'v1': 5}
clustering_values = range(3)
for c in clustering_values:
values['c0'] = c
values['v1'] = c
i = model.create(**values)
self.assertEqual(model.objects(k0=i.k0, k1=i.k1).count(), len(clustering_values))
self.assertEqual(model.objects(k0=i.k0, k1=i.k1, v1=0).count(), 1)
@execute_count(1)
def test_db_field_names_used(self):
"""
Tests to ensure that with generated cql update statements correctly utilize the db_field values.
@since 3.2
@jira_ticket PYTHON-530
@expected_result resulting cql_statements will use the db_field values
@test_category object_mapper
"""
values = ('k0', 'k1', 'c0', 'v0', 'v1')
# Test QuerySet Path
b = BatchQuery()
DBFieldModel.objects(k0=1).batch(b).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b.queries[0]))
# Test DML path
b2 = BatchQuery()
dml_field_model = DBFieldModel.create(k0=1, k1=5, c0=3, v0=4, v1=5)
dml_field_model.batch(b2).update(
v0=0,
v1=9,
)
for value in values:
self.assertTrue(value not in str(b2.queries[0]))
def test_db_field_value_list(self):
DBFieldModel.create(k0=0, k1=0, c0=0, v0=4, v1=5)
self.assertEqual(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0')._defer_fields,
{'a', 'c', 'b'})
self.assertEqual(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0')._only_fields,
['c', 'd'])
list(DBFieldModel.objects.filter(c0=0, k0=0, k1=0).values_list('c0', 'v0'))
class TestModelSmall(Model):
test_id = columns.Integer(primary_key=True)
class TestModelQueryWithFetchSize(BaseCassEngTestCase):
"""
Test FetchSize, and ensure that results are returned correctly
regardless of the paging size
@since 3.1
@jira_ticket PYTHON-324
@expected_result results are properly retrieved and the correct size
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithFetchSize, cls).setUpClass()
sync_table(TestModelSmall)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithFetchSize, cls).tearDownClass()
drop_table(TestModelSmall)
@execute_count(9)
def test_defaultFetchSize(self):
with BatchQuery() as b:
for i in range(5100):
TestModelSmall.batch(b).create(test_id=i)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(500)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(4999)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5000)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5001)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5100)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(5101)), 5100)
self.assertEqual(len(TestModelSmall.objects.fetch_size(1)), 5100)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(0)
with self.assertRaises(QueryException):
TestModelSmall.objects.fetch_size(-1)
class People(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
birthday = columns.DateTime()
class People2(Model):
__table_name__ = "people"
last_name = columns.Text(primary_key=True, partition_key=True)
first_name = columns.Text(primary_key=True)
middle_name = columns.Text()
birthday = columns.DateTime()
class TestModelQueryWithDifferedFeld(BaseCassEngTestCase):
"""
Tests that selects with filter will deffer population of known values until after the results are returned.
I.E. Instead of generating SELECT * FROM People WHERE last_name="Smith" It will generate
SELECT first_name, birthday FROM People WHERE last_name="Smith"
Where last_name 'smith' will populated post query
@since 3.2
@jira_ticket PYTHON-520
@expected_result only needed fields are included in the query
@test_category object_mapper
"""
@classmethod
def setUpClass(cls):
super(TestModelQueryWithDifferedFeld, cls).setUpClass()
sync_table(People)
@classmethod
def tearDownClass(cls):
super(TestModelQueryWithDifferedFeld, cls).tearDownClass()
drop_table(People)
@execute_count(8)
def test_defaultFetchSize(self):
# Populate Table
People.objects.create(last_name="Smith", first_name="John", birthday=datetime.now())
People.objects.create(last_name="Bestwater", first_name="Alan", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Greg", birthday=datetime.now())
People.objects.create(last_name="Smith", first_name="Adam", birthday=datetime.now())
# Check query constructions
expected_fields = ['first_name', 'birthday']
self.assertEqual(People.filter(last_name="Smith")._select_fields(), expected_fields)
# Validate correct fields are fetched
smiths = list(People.filter(last_name="Smith"))
self.assertEqual(len(smiths), 3)
self.assertTrue(smiths[0].last_name is not None)
# Modify table with new value
sync_table(People2)
# populate new format
People2.objects.create(last_name="Smith", first_name="Chris", middle_name="Raymond", birthday=datetime.now())
People2.objects.create(last_name="Smith", first_name="Andrew", middle_name="Micheal", birthday=datetime.now())
# validate query construction
expected_fields = ['first_name', 'middle_name', 'birthday']
self.assertEqual(People2.filter(last_name="Smith")._select_fields(), expected_fields)
# validate correct items are returneds
smiths = list(People2.filter(last_name="Smith"))
self.assertEqual(len(smiths), 5)
self.assertTrue(smiths[0].last_name is not None)
| |
"""Models mixins for Social Auth"""
import re
import time
import base64
import uuid
from datetime import datetime, timedelta
import six
from openid.association import Association as OpenIdAssociation
from social.backends.utils import get_backend
from social.strategies.utils import get_current_strategy
CLEAN_USERNAME_REGEX = re.compile(r'[^\w.@+-_]+', re.UNICODE)
class UserMixin(object):
user = ''
provider = ''
uid = None
extra_data = None
def get_backend(self, strategy=None):
strategy = strategy or get_current_strategy()
if strategy:
return get_backend(strategy.backends, self.provider)
def get_backend_instance(self, strategy=None):
strategy = strategy or get_current_strategy()
Backend = self.get_backend(strategy)
if Backend:
return Backend(strategy=strategy)
@property
def tokens(self):
"""Return access_token stored in extra_data or None"""
return self.extra_data.get('access_token')
def refresh_token(self, strategy, *args, **kwargs):
token = self.extra_data.get('refresh_token') or \
self.extra_data.get('access_token')
backend = self.get_backend(strategy)
if token and backend and hasattr(backend, 'refresh_token'):
backend = backend(strategy=strategy)
response = backend.refresh_token(token, *args, **kwargs)
access_token = response.get('access_token')
refresh_token = response.get('refresh_token')
if access_token or refresh_token:
if access_token:
self.extra_data['access_token'] = access_token
if refresh_token:
self.extra_data['refresh_token'] = refresh_token
self.save()
def expiration_datetime(self):
"""Return provider session live seconds. Returns a timedelta ready to
use with session.set_expiry().
If provider returns a timestamp instead of session seconds to live, the
timedelta is inferred from current time (using UTC timezone). None is
returned if there's no value stored or it's invalid.
"""
if self.extra_data and 'expires' in self.extra_data:
try:
expires = int(self.extra_data.get('expires'))
except (ValueError, TypeError):
return None
now = datetime.utcnow()
# Detect if expires is a timestamp
if expires > time.mktime(now.timetuple()):
# expires is a datetime
return datetime.fromtimestamp(expires) - now
else:
# expires is a timedelta
return timedelta(seconds=expires)
def set_extra_data(self, extra_data=None):
if extra_data and self.extra_data != extra_data:
if self.extra_data:
self.extra_data.update(extra_data)
else:
self.extra_data = extra_data
return True
@classmethod
def changed(cls, user):
"""The given user instance is ready to be saved"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_username(cls, user):
"""Return the username for given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_model(cls):
"""Return the user model"""
raise NotImplementedError('Implement in subclass')
@classmethod
def username_max_length(cls):
"""Return the max length for username"""
raise NotImplementedError('Implement in subclass')
@classmethod
def clean_username(cls, value):
return CLEAN_USERNAME_REGEX.sub('', value)
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
"""Return if it's safe to disconnect the social account for the
given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def disconnect(cls, entry):
"""Disconnect the social account for the given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_user(cls, *args, **kwargs):
"""Create a user instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_user(cls, pk):
"""Return user instance for given id"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_users_by_email(cls, email):
"""Return users instances for given email address"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth(cls, provider, uid):
"""Return UserSocialAuth for given provider and uid"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
"""Return all the UserSocialAuth instances for given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_social_auth(cls, user, uid, provider):
"""Create a UserSocialAuth instance for given user"""
raise NotImplementedError('Implement in subclass')
class NonceMixin(object):
"""One use numbers"""
server_url = ''
timestamp = 0
salt = ''
@classmethod
def use(cls, server_url, timestamp, salt):
"""Create a Nonce instance"""
raise NotImplementedError('Implement in subclass')
class AssociationMixin(object):
"""OpenId account association"""
server_url = ''
handle = ''
secret = ''
issued = 0
lifetime = 0
assoc_type = ''
@classmethod
def oids(cls, server_url, handle=None):
kwargs = {'server_url': server_url}
if handle is not None:
kwargs['handle'] = handle
return sorted([
(assoc.id, cls.openid_association(assoc))
for assoc in cls.get(**kwargs)
], key=lambda x: x[1].issued, reverse=True)
@classmethod
def openid_association(cls, assoc):
secret = assoc.secret
if not isinstance(secret, six.binary_type):
secret = secret.encode()
return OpenIdAssociation(assoc.handle, base64.decodestring(secret),
assoc.issued, assoc.lifetime,
assoc.assoc_type)
@classmethod
def store(cls, server_url, association):
"""Create an Association instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get(cls, *args, **kwargs):
"""Get an Association instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def remove(cls, ids_to_delete):
"""Remove an Association instance"""
raise NotImplementedError('Implement in subclass')
class CodeMixin(object):
email = ''
code = ''
verified = False
def verify(self):
self.verified = True
self.save()
@classmethod
def generate_code(cls):
return uuid.uuid4().hex
@classmethod
def make_code(cls, email):
code = cls()
code.email = email
code.code = cls.generate_code()
code.verified = False
code.save()
return code
@classmethod
def get_code(cls, code):
raise NotImplementedError('Implement in subclass')
class BaseStorage(object):
user = UserMixin
nonce = NonceMixin
association = AssociationMixin
code = CodeMixin
@classmethod
def is_integrity_error(cls, exception):
"""Check if given exception flags an integrity error in the DB"""
raise NotImplementedError('Implement in subclass')
| |
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
from math import log
import numpy as np
from scipy.linalg import pinvh
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils import check_random_state
from sklearn.linear_model import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
from sklearn.utils.extmath import fast_logdet
diabetes = datasets.load_diabetes()
def test_n_iter():
"""Check value of n_iter."""
X = np.array([[1], [2], [6], [8], [10]])
y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(n_iter=0)
msg = "n_iter should be greater than or equal to 1."
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_bayesian_ridge_scores():
"""Check scores attribute shape"""
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
assert clf.scores_.shape == (clf.n_iter_ + 1,)
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1. / (np.var(y) + eps)
lambda_ = 1.
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
M_inv = pinvh(M)
score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
n_samples * log(2 * np.pi))
# compute score with BayesianRidge
clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
lambda_1=lambda_1, lambda_2=lambda_2,
n_iter=1, fit_intercept=False, compute_score=True)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_bayesian_sample_weights():
# Test correctness of the sample_weights method
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
w = np.array([4, 3, 3, 1, 1, 2, 3]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(
X, y, sample_weight=w)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_bayesian_initial_params():
# Test BayesianRidge with initial values (alpha_init, lambda_init)
X = np.vander(np.linspace(0, 4, 5), 4)
y = np.array([0., 1., 0., -1., 0.]) # y = (x^3 - 6x^2 + 8x) / 3
# In this case, starting from the default initial values will increase
# the bias of the fitted curve. So, lambda_init should be small.
reg = BayesianRidge(alpha_init=1., lambda_init=1e-3)
# Check the R2 score nearly equals to one.
r2 = reg.fit(X, y).score(X, y)
assert_almost_equal(r2, 1.)
def test_prediction_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression predictions for edge case of
# constant target vectors
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
for clf in [BayesianRidge(), ARDRegression()]:
y_pred = clf.fit(X, y).predict(X)
assert_array_almost_equal(y_pred, expected)
def test_std_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression standard dev. for edge case of
# constant target vector
# The standard dev. should be relatively small (< 0.01 is tested here)
n_samples = 10
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected_upper_boundary = 0.01
for clf in [BayesianRidge(), ARDRegression()]:
_, y_std = clf.fit(X, y).predict(X, return_std=True)
assert_array_less(y_std, expected_upper_boundary)
def test_update_of_sigma_in_ard():
# Checks that `sigma_` is updated correctly after the last iteration
# of the ARDRegression algorithm. See issue #10128.
X = np.array([[1, 0],
[0, 0]])
y = np.array([0, 0])
clf = ARDRegression(n_iter=1)
clf.fit(X, y)
# With the inputs above, ARDRegression prunes both of the two coefficients
# in the first iteration. Hence, the expected shape of `sigma_` is (0, 0).
assert clf.sigma_.shape == (0, 0)
# Ensure that no error is thrown at prediction stage
clf.predict(X, return_std=True)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
@pytest.mark.parametrize('seed', range(100))
@pytest.mark.parametrize('n_samples, n_features', ((10, 100), (100, 10)))
def test_ard_accuracy_on_easy_problem(seed, n_samples, n_features):
# Check that ARD converges with reasonable accuracy on an easy problem
# (Github issue #14055)
X = np.random.RandomState(seed=seed).normal(size=(250, 3))
y = X[:, 1]
regressor = ARDRegression()
regressor.fit(X, y)
abs_coef_error = np.abs(1 - regressor.coef_[1])
assert abs_coef_error < 1e-10
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
@pytest.mark.parametrize('seed', range(10))
def test_update_sigma(seed):
# make sure the two update_sigma() helpers are equivalent. The woodbury
# formula is used when n_samples < n_features, and the other one is used
# otherwise.
rng = np.random.RandomState(seed)
# set n_samples == n_features to avoid instability issues when inverting
# the matrices. Using the woodbury formula would be unstable when
# n_samples > n_features
n_samples = n_features = 10
X = rng.randn(n_samples, n_features)
alpha = 1
lmbda = np.arange(1, n_features + 1)
keep_lambda = np.array([True] * n_features)
reg = ARDRegression()
sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda)
sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda)
np.testing.assert_allclose(sigma, sigma_woodbury)
# FIXME: 'normalize' to be removed in 1.2 in LinearRegression
@pytest.mark.filterwarnings("ignore:'normalize' was deprecated")
def test_ard_regression_predict_normalize_true():
"""Check that we can predict with `normalize=True` and `return_std=True`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18605
"""
clf = ARDRegression(normalize=True)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
clf.predict([[1, 1]], return_std=True)
| |
from sys import getsizeof
import operator
import numpy as np
from pandas._libs import index as libindex
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_int64_dtype)
from pandas import compat
from pandas.compat import lrange, range
from pandas.compat.numpy import function as nv
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.indexes.base as ibase
from pandas.core.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic range. RangeIndex is a
memory-saving special case of Int64Index limited to representing
monotonic ranges.
Parameters
----------
start : int (default: 0), or other RangeIndex instance.
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
"""
_typ = 'rangeindex'
_engine_type = libindex.Int64Engine
def __new__(cls, start=None, stop=None, step=None, name=None, dtype=None,
fastpath=False, copy=False, **kwargs):
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def _ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if start is None and stop is None and step is None:
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = _ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = _ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = _ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" create RangeIndex from a range (py3), or xrange (py2) object """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
if compat.PY3:
step = data.step
stop = data.stop
start = data.start
else:
# seems we only have indexing ops to infer
# rather than direct accessors
if len(data) > 1:
step = data[1] - data[0]
stop = data[-1] + step
start = data[0]
elif len(data):
start = data[0]
stop = data[0] + 1
step = 1
else:
start = stop = 0
step = 1
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index(self._data, name=self.name, fastpath=True)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self):
# we are formatting thru the attributes
return None
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return sum([getsizeof(getattr(self, v)) for v in
['_start', '_stop', '_step']])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@property
def has_duplicates(self):
return False
def tolist(self):
return lrange(self._start, self._stop, self._step)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
return RangeIndex(name=self.name, fastpath=True,
**dict(self._get_data_as_items()))
else:
kwargs.setdefault('name', self.name)
return self._int64index._shallow_copy(values, **kwargs)
@Appender(ibase._index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return RangeIndex(name=name, fastpath=True,
**dict(self._get_data_as_items()))
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super(RangeIndex, self).equals(other)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(min(self._start, self._stop + 1),
min(other._start, other._stop + 1))
int_high = min(max(self._stop, self._start + 1),
max(other._stop, other._start + 1))
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = self._extended_gcd(self._step, other._step)
# check whether element sets intersect
if (self._start - other._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = self._start + (other._start - self._start) * \
self._step // gcd * s
new_step = self._step * other._step // gcd
new_index = RangeIndex(tmp_start, int_high, new_step, fastpath=True)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
return new_index
def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps
def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
if len(self) == 0:
return other
if isinstance(other, RangeIndex):
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other)
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers,
sort)
return super(RangeIndex, self).join(other, how, level, return_indexers,
sort)
def __len__(self):
"""
return the length of the RangeIndex
"""
return max(0, -(-(self._stop - self._start) // self._step))
@property
def size(self):
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
super_getitem = super(RangeIndex, self).__getitem__
if is_scalar(key):
n = int(key)
if n != key:
return super_getitem(key)
if n < 0:
n = len(self) + key
if n < 0 or n > len(self) - 1:
raise IndexError("index {key} is out of bounds for axis 0 "
"with size {size}".format(key=key,
size=len(self)))
return self._start + n * self._step
if isinstance(key, slice):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
l = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
start = l - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
start += l
if start < 0:
start = -1 if step < 0 else 0
if start >= l:
start = l - 1 if step < 0 else l
if key.stop is None:
stop = -1 if step < 0 else l
else:
stop = key.stop
if stop < 0:
stop += l
if stop < 0:
stop = -1
if stop > l:
stop = l
# delegate non-integer slices
if (start != int(start) or
stop != int(stop) or
step != int(step)):
return super_getitem(key)
# convert indexes to values
start = self._start + self._step * start
stop = self._start + self._step * stop
step = self._step * step
return RangeIndex(start, stop, step, self.name, fastpath=True)
# fall back to Int64Index
return super_getitem(key)
def __floordiv__(self, other):
if is_integer(other):
if (len(self) == 0 or
self._start % other == 0 and
self._step % other == 0):
start = self._start // other
step = self._step // other
stop = start + len(self) * step
return RangeIndex(start, stop, step, name=self.name,
fastpath=True)
if len(self) == 1:
start = self._start // other
return RangeIndex(start, start + 1, 1, name=self.name,
fastpath=True)
return self._int64index // other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, opstr, reversed=False, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
opstr : string
string name of ops
reversed : boolean, default False
if this is a reversed op, e.g. radd
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
other = self._validate_for_numeric_binop(other, op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if reversed:
self, other = other, self
try:
# alppy if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(self._step, other)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = self._step
with np.errstate(all='ignore'):
rstart = op(self._start, other)
rstop = op(self._stop, other)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all([is_integer(x) for x in
[rstart, rstop, rstep]]):
result = result.astype('float64')
return result
except (ValueError, TypeError, AttributeError):
pass
# convert to Int64Index ops
if isinstance(self, RangeIndex):
self = self.values
if isinstance(other, RangeIndex):
other = other.values
with np.errstate(all='ignore'):
results = op(self, other)
return Index(results, **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul,
'__mul__',
step=operator.mul)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv,
'__truediv__',
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv,
'__truediv__',
reversed=True,
step=operator.truediv)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div,
'__div__',
step=operator.div)
cls.__rdiv__ = _make_evaluate_binop(
operator.div,
'__div__',
reversed=True,
step=operator.div)
RangeIndex._add_numeric_methods()
RangeIndex._add_logical_methods()
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-12-08 22:23:10
import time
import logging
logger = logging.getLogger('bench')
from six.moves import queue as Queue
from pyspider.scheduler import ThreadBaseScheduler as Scheduler
from pyspider.fetcher.tornado_fetcher import Fetcher
from pyspider.processor import Processor
from pyspider.result import ResultWorker
from pyspider.libs.utils import md5string
def bench_test_taskdb(taskdb):
project_name = '__bench_test__'
task = {
"fetch": {
"fetch_type": "js",
"headers": {
"User-Agent": "BaiDuSpider"
}
},
"process": {
"callback": "detail_page"
},
"project": project_name,
"taskid": "553300d2582154413b4982c00c34a2d5",
"url": "http://www.sciencedirect.com/science/article/pii/S1674200109000704"
}
track = {
"fetch": {
"content": None,
"encoding": "unicode",
"error": None,
"headers": {
"last-modified": "Wed, 04 Mar 2015 09:24:33 GMT"
},
"ok": True,
"redirect_url": None,
"status_code": 200,
"time": 5.543
},
"process": {
"exception": None,
"follows": 4,
"logs": "",
"ok": True,
"result": "{'url': u'",
"time": 0.07105398178100586
}
}
def test_insert(n, start=0):
logger.info("taskdb insert %d", n)
start_time = time.time()
for i in range(n):
task['url'] = 'http://bench.pyspider.org/?l=%d' % (i + start)
task['taskid'] = md5string(task['url'])
task['track'] = {}
taskdb.insert(task['project'], task['taskid'], task)
end_time = time.time()
cost_time = end_time - start_time
logger.info("cost %.2fs, %.2f/s %.2fms",
cost_time, n * 1.0 / cost_time, cost_time / n * 1000)
def test_update(n, start=0):
logger.info("taskdb update %d" % n)
start_time = time.time()
for i in range(n):
task['url'] = 'http://bench.pyspider.org/?l=%d' % (i + start)
task['taskid'] = md5string(task['url'])
task['track'] = track
taskdb.update(task['project'], task['taskid'], task)
end_time = time.time()
cost_time = end_time - start_time
logger.info("cost %.2fs, %.2f/s %.2fms",
cost_time, n * 1.0 / cost_time, cost_time / n * 1000)
request_task_fields = [
'taskid',
'project',
'url',
'status',
'fetch',
'process',
'track',
'lastcrawltime'
]
def test_get(n, start=0, random=True, fields=request_task_fields):
logger.info("taskdb get %d %s" % (n, "randomly" if random else ""))
range_n = list(range(n))
if random:
from random import shuffle
shuffle(range_n)
start_time = time.time()
for i in range_n:
task['url'] = 'http://bench.pyspider.org/?l=%d' % (i + start)
task['taskid'] = md5string(task['url'])
task['track'] = track
taskdb.get_task(task['project'], task['taskid'], fields=fields)
end_time = time.time()
cost_time = end_time - start_time
logger.info("cost %.2fs, %.2f/s %.2fms",
cost_time, n * 1.0 / cost_time, cost_time / n * 1000)
try:
test_insert(1000)
test_update(1000)
test_get(1000)
test_insert(10000, 1000)
test_update(10000, 1000)
test_get(10000, 1000)
except Exception as e:
logger.exception(e)
finally:
taskdb.drop(project_name)
def bench_test_message_queue(queue):
task = {
"fetch": {
"fetch_type": "js",
"headers": {
"User-Agent": "BaiDuSpider"
}
},
"process": {
"callback": "detail_page"
},
"project": "__bench_test__",
"taskid": "553300d2582154413b4982c00c34a2d5",
"url": "http://www.sciencedirect.com/science/article/pii/S1674200109000704"
}
def test_put(n):
logger.info("message queue put %d", n)
start_time = time.time()
for i in range(n):
task['url'] = 'http://bench.pyspider.org/?l=%d' % i
task['taskid'] = md5string(task['url'])
queue.put(task, block=True, timeout=1)
end_time = time.time()
cost_time = end_time - start_time
logger.info("cost %.2fs, %.2f/s %.2fms",
cost_time, n * 1.0 / cost_time, cost_time / n * 1000)
def test_get(n):
logger.info("message queue get %d", n)
start_time = time.time()
for i in range(n):
try:
queue.get(True, 1)
except Queue.Empty:
logger.error('message queue empty while get %d', i)
raise
end_time = time.time()
cost_time = end_time - start_time
logger.info("cost %.2fs, %.2f/s %.2fms",
cost_time, n * 1.0 / cost_time, cost_time / n * 1000)
try:
test_put(1000)
test_get(1000)
test_put(10000)
test_get(10000)
except Exception as e:
logger.exception(e)
finally:
if hasattr(queue, 'channel'):
queue.channel.queue_purge(queue.name)
# clear message queue
try:
while queue.get(False):
continue
except Queue.Empty:
pass
class BenchMixin(object):
"""Report to logger for bench test"""
def _bench_init(self):
self.done_cnt = 0
self.start_time = time.time()
self.last_cnt = 0
self.last_report = 0
def _bench_report(self, name, prefix=0, rjust=0):
self.done_cnt += 1
now = time.time()
if now - self.last_report >= 1:
rps = float(self.done_cnt - self.last_cnt) / (now - self.last_report)
output = ''
if prefix:
output += " " * prefix
output += ("%s %s pages (at %d pages/min)" % (
name, self.done_cnt, rps * 60.0)).rjust(rjust)
logger.info(output)
self.last_cnt = self.done_cnt
self.last_report = now
class BenchScheduler(Scheduler, BenchMixin):
def __init__(self, *args, **kwargs):
super(BenchScheduler, self).__init__(*args, **kwargs)
self._bench_init()
self.trigger_on_start('__bench_test__')
def on_task_status(self, task):
self._bench_report('Crawled')
return super(BenchScheduler, self).on_task_status(task)
class BenchFetcher(Fetcher, BenchMixin):
def __init__(self, *args, **kwargs):
super(BenchFetcher, self).__init__(*args, **kwargs)
self._bench_init()
def on_result(self, type, task, result):
self._bench_report("Fetched", 0, 75)
return super(BenchFetcher, self).on_result(type, task, result)
class BenchProcessor(Processor, BenchMixin):
def __init__(self, *args, **kwargs):
super(BenchProcessor, self).__init__(*args, **kwargs)
self._bench_init()
def on_task(self, task, response):
self._bench_report("Processed", 75)
return super(BenchProcessor, self).on_task(task, response)
class BenchResultWorker(ResultWorker, BenchMixin):
def __init__(self, *args, **kwargs):
super(BenchResultWorker, self).__init__(*args, **kwargs)
self._bench_init()
def on_result(self, task, result):
self._bench_report("Saved", 0, 150)
super(BenchResultWorker, self).on_result(task, result)
bench_script = '''
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
def on_start(self):
self.crawl('http://127.0.0.1:5000/bench',
params={'total': %(total)d, 'show': %(show)d},
callback=self.index_page)
def index_page(self, response):
for each in response.doc('a[href^="http://"]').items():
self.crawl(each.attr.href, callback=self.index_page)
return response.url
'''
| |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body_sanity_checks(self):
hello_url = self.get_url('/hello')
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be None' in str(context.exception))
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be None' in str(context.exception))
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
| |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.resource import ResourceClient
from hpOneView.resources.servers.server_profiles import ServerProfiles
TIMEOUT = -1
class ServerProfilesTest(TestCase):
def setUp(self):
host = '127.0.0.1'
http_connection = connection(host)
self._resource = ServerProfiles(http_connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all(self, mock_get_all):
query_filter = 'name=TestName'
sort = 'name:ascending'
self._resource.get_all(start=2, count=500, filter=query_filter, sort=sort)
mock_get_all.assert_called_once_with(start=2, count=500, filter=query_filter, sort=sort)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id(self, mock_get):
id = "6fee02f3-b7c7-42bd-a528-04341e16bad6"
self._resource.get(id)
mock_get.assert_called_once_with(id_or_uri=id)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_property(self, mock_get_by):
profile_property = "name"
profile_name = "Server Profile Test"
self._resource.get_by(profile_property, profile_name)
mock_get_by.assert_called_once_with(profile_property, profile_name)
@mock.patch.object(ResourceClient, 'get_by_name')
def test_get_by_name(self, mock_get_by_name):
profile_name = "Server Profile Test"
self._resource.get_by_name(profile_name)
mock_get_by_name.assert_called_once_with(profile_name)
@mock.patch.object(ResourceClient, 'create')
def test_create(self, mock_create):
template = dict(name="Server Profile Test")
expected_template = template.copy()
expected_template["type"] = "ServerProfileV5"
self._resource.create(resource=template, timeout=TIMEOUT)
mock_create.assert_called_once_with(resource=expected_template, timeout=TIMEOUT)
@mock.patch.object(ResourceClient, 'update')
def test_update(self, mock_update):
uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae"
template = dict(name="Server Profile Test", macType="Virtual")
expected_template = template.copy()
expected_template["type"] = "ServerProfileV5"
self._resource.update(resource=template, id_or_uri=uri)
mock_update.assert_called_once_with(resource=expected_template, uri=uri)
@mock.patch.object(ResourceClient, 'delete')
def test_delete(self, mock_delete):
template = dict(name="Server Profile Test")
self._resource.delete(resource=template, timeout=TIMEOUT)
mock_delete.assert_called_once_with(resource=template, timeout=TIMEOUT)
@mock.patch.object(ResourceClient, 'patch')
def test_patch(self, mock_pacth):
uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae"
self._resource.patch(uri, "replace", "/templateCompliance", "Compliant")
mock_pacth.assert_called_once_with(uri, "replace", "/templateCompliance", "Compliant", -1)
@mock.patch.object(ResourceClient, 'get_schema')
def test_get_schema(self, get_schema):
self._resource.get_schema()
get_schema.assert_called_once()
@mock.patch.object(ResourceClient, 'get')
def test_get_compliance_preview(self, mock_get):
server_uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae"
uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae/compliance-preview"
self._resource.get_compliance_preview(server_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_profile_ports(self, mock_get):
uri = "/rest/server-profiles/profile-ports" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_profile_ports(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_messages(self, mock_get):
server_uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae"
uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae/messages"
self._resource.get_messages(server_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_transformation(self, mock_get):
server_id = "4ff2327f-7638-4b66-ad9d-283d4940a4ae"
uri = "/rest/server-profiles/4ff2327f-7638-4b66-ad9d-283d4940a4ae/transformation" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_transformation(server_id, enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_available_networks(self, mock_get):
uri = "/rest/server-profiles/available-networks" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_available_networks(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_available_servers(self, mock_get):
uri = "/rest/server-profiles/available-servers" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_available_servers(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_available_storage_system(self, mock_get):
uri = "/rest/server-profiles/available-storage-system" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_available_storage_system(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_available_storage_systems(self, mock_get):
uri = "/rest/server-profiles/available-storage-systems" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_available_storage_systems(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(start=0, count=-1, filter='', sort='', uri=uri)
@mock.patch.object(ResourceClient, 'get')
def test_get_available_targets(self, mock_get):
uri = "/rest/server-profiles/available-targets" \
"?enclosureGroupUri=/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4" \
"&serverHardwareTypeUri=/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
server_hardware_type_uri = "/rest/server-hardware-types/C8DEF9A6-9586-465E-A951-3070988BC226"
enclosure_group_uri = "/rest/enclosure-groups/a0f1c07b-f811-4c85-8e38-ac5ec34ea2f4"
self._resource.get_available_targets(enclosureGroupUri=enclosure_group_uri,
serverHardwareTypeUri=server_hardware_type_uri)
mock_get.assert_called_once_with(uri)
| |
import unittest
import six
import json
from datetime import datetime
from mock import patch, mock_open
from wargaming import WoT, WGN, WoTB, WoWP, WoWS, WoTX, settings
from wargaming.meta import MetaAPI, BaseAPI, WGAPI
from wargaming.settings import RETRY_COUNT, HTTP_USER_AGENT_HEADER
from wargaming.exceptions import ValidationError, RequestError
open_schema = mock_open(read_data=json.dumps(
{'sub_module_name': {
'func1': {
'allowed_http_methods': ['GET', 'POST'],
'description': 'doc example for func1',
'parameters': [{
'name': 'application_id',
'description': 'doc for application_id',
'required': True,
'type': 'string',
}, {
'name': 'language',
'description': 'doc for language',
'required': True,
'type': 'string',
}],
'fields': [{
'name': ['integer'],
'type': 'numeric',
}, {
'name': ['timestamp'],
'type': 'timestamp',
}],
'url': 'sub_module_name/func1',
}, 'func2': {
'allowed_http_methods': ['GET', 'POST'],
'description': 'doc example for func2',
'parameters': [{
'name': 'application_id',
'description': 'doc for application_id',
'required': True,
'type': 'string',
}, {
'name': 'language',
'description': 'doc for language',
'required': True,
'type': 'string',
}, {
'name': 'date',
'description': 'sample date field',
'type': 'timestamp/date',
}, {
'name': 'fields',
'description': 'doc for fields',
'type': 'string',
}],
'url': 'sub_module_name/func2',
},
# }, 'func3': { is used for custom function
'func4': {
'allowed_http_methods': ['POST'],
'description': 'doc example for func4',
'parameters': [{
'name': 'application_id',
'description': 'doc for application_id',
'required': True,
'type': 'string',
}, {
'name': 'language',
'description': 'doc for language',
'required': True,
'type': 'string',
}, {
'name': 'clan_id',
'description': 'fetch info about clan',
'required': True,
'type': 'string',
}],
'url': 'sub_module_name/func4',
}
}}
))
@patch('wargaming.meta.ALLOWED_GAMES', ['demo'])
@patch('wargaming.meta.GAME_API_ENDPOINTS', {'demo': 'https://api.worldoftanks'})
@patch('wargaming.meta.open', open_schema)
def get_demo_class():
class Demo(six.with_metaclass(MetaAPI, BaseAPI)):
def __init__(self, *args, **kwargs):
super(Demo, self).__init__(*args, **kwargs)
self.sub_module_name.func3 = lambda: True
return Demo
class WargamingMetaTestCase(unittest.TestCase):
@patch('wargaming.meta.ALLOWED_GAMES', ['demo'])
@patch('wargaming.meta.GAME_API_ENDPOINTS', {'demo': 'https://api.worldoftanks'})
@patch('wargaming.meta.open', open_schema)
def setUp(self):
self.demo = get_demo_class()('demo', 'ru', 'ru')
def test_invalid_game(self):
with self.assertRaises(ValidationError):
class Demo(six.with_metaclass(MetaAPI, BaseAPI)):
pass
def test_invalid_region(self):
with self.assertRaises(ValidationError):
WoT('demo', 'ua', 'ua')
@patch('wargaming.meta.requests.get')
def test_call_api(self, get):
base_url = self.demo.base_url
self.demo.sub_module_name.func1()._fetch_data()
get.assert_called_once_with(
base_url + 'sub_module_name/func1/',
params={
'language': self.demo.language,
'application_id': self.demo.application_id,
},
headers={
'User-Agent': HTTP_USER_AGENT_HEADER,
}
)
get.reset_mock()
self.demo.sub_module_name.func2(application_id=1, language=1,
fields=list(range(3)))\
._fetch_data()
get.assert_called_once_with(
base_url + 'sub_module_name/func2/',
params={
'language': 1,
'application_id': 1,
'fields': '0,1,2',
},
headers={
'User-Agent': HTTP_USER_AGENT_HEADER,
}
)
def test_parameters_tuple_list_join(self):
fields = ['f1', 'f2', 'f3']
res = self.demo.sub_module_name.func2(fields=fields)
self.assertEqual(','.join(fields), res.params['fields'])
fields = [1, 2, 3]
res = self.demo.sub_module_name.func2(fields=fields)
self.assertEqual(','.join([str(i) for i in fields]), res.params['fields'])
fields = (1, 2, 3)
res = self.demo.sub_module_name.func2(fields=fields)
self.assertEqual(','.join([str(i) for i in fields]), res.params['fields'])
def test_parameters_convert_date(self):
date = datetime(2016, 1, 1, 13, 0, 0)
res = self.demo.sub_module_name.func2(date=date)
self.assertEqual(date.isoformat(), res.params['date'])
def test_parameters_schema_function_wrong_parameter(self):
with self.assertRaises(ValidationError):
self.demo.sub_module_name.func1(wrong_parameter='123')
def test_parameters_schema_function_missing_required_parameter(self):
with self.assertRaises(ValidationError):
self.demo.sub_module_name.func4()
self.demo.sub_module_name.func4(clan_id='123')
@patch('wargaming.meta.requests.get')
def test_schema_function(self, get):
get.return_value.json.return_value = {'status': 'ok', 'data': [{'id': '123456'}]}
value = self.demo.sub_module_name.func1()
self.assertIsInstance(value, WGAPI)
ret_val = list(i for i in value)
self.assertEqual(ret_val, [{'id': '123456'}])
get.assert_called_once_with(self.demo.base_url + 'sub_module_name/func1/',
headers={'User-Agent': settings.HTTP_USER_AGENT_HEADER},
params={'application_id': 'demo', 'language': 'ru'})
def test_custom_function(self):
self.assertEqual(self.demo.sub_module_name.func3(), True)
@patch('wargaming.meta.requests.get')
def test_wgapi_list(self, get):
data = [{'id': '123456'}]
get.return_value.json.return_value = {'status': 'ok', 'data': data}
res = WGAPI('http://apiurl/')
self.assertEqual(res[0], data[0])
self.assertEqual(list(res), data)
@patch('wargaming.meta.requests.get')
def test_wgapi_dict(self, get):
data = {
'123456': {'name': 'title'},
123458: {'name': 'title 3'},
}
get.return_value.json.return_value = {'status': 'ok', 'data': data}
res = WGAPI('http://apiurl/')
# test conversion of numeric keys
self.assertEqual(res[123456], data['123456'])
self.assertEqual(res['123456'], data['123456'])
self.assertEqual(res[123458], data[123458])
self.assertEqual(res['123458'], data[123458])
self.assertEqual(dict(res), data)
self.assertTrue(all(i in res.values() for i in data.values()))
@patch('wargaming.meta.requests.get')
def test_wgapi_strings(self, get):
data = [{'id': '123456'}]
get.return_value.json.return_value = {'status': 'ok', 'data': data}
res = WGAPI('http://apiurl/')
self.assertEqual(str(res), str(data))
self.assertEqual(repr(res), str(data))
data = [{'id': '123456'}] * 20
get.return_value.json.return_value = {'status': 'ok', 'data': data}
res = WGAPI('http://apiurl/')
self.assertEqual(repr(res), str(data)[0:200] + '...')
@patch('wargaming.meta.requests.get')
def test_wgapi_retry(self, get):
get.return_value.json.return_value = {'status': 'error', 'error': {
'code': 504,
'field': None,
'message': u'SOURCE_NOT_AVAILABLE',
'value': None
}}
res = WGAPI('http://apiurl/')
with self.assertRaises(RequestError):
res._fetch_data()
self.assertEqual(get.return_value.json.call_count, RETRY_COUNT)
@patch('wargaming.meta.requests.get')
def test_wg_unofficial(self, get):
get.return_value.json.return_value = [{'id': '123456'}]
wot = WoT('demo', 'ru', 'ru')
with self.assertRaises(ValidationError):
wot.globalmap.wg_clan_battles('')
res = wot.globalmap.wg_clan_battles(123)
self.assertIsInstance(res, WGAPI)
class WargamingMetaWithParserTestCase(WargamingMetaTestCase):
@patch('wargaming.meta.ALLOWED_GAMES', ['demo'])
@patch('wargaming.meta.GAME_API_ENDPOINTS', {'demo': 'https://api.worldoftanks'})
@patch('wargaming.meta.open', open_schema)
def setUp(self):
self.demo = get_demo_class()('demo', 'ru', 'ru', enable_parser=True)
@patch('wargaming.meta.requests.get')
def test_field_datetime_conversion(self, get):
get.return_value.json.return_value = {
'status': 'ok', 'data': {'integer': '1', 'timestamp': 1}}
res = self.demo.sub_module_name.func1()
self.assertEqual(res.data, {'integer': 1, 'timestamp': datetime.fromtimestamp(1)})
class WargamingTestCase(unittest.TestCase):
@staticmethod
def test_real_schema():
WoT('demo', 'ru', 'ru')
WGN('demo', 'ru', 'ru')
WoTB('demo', 'ru', 'ru')
WoWP('demo', 'ru', 'ru')
WoWS('demo', 'ru', 'ru')
WoTX('demo', 'ru', 'ru')
| |
from __future__ import division, print_function, absolute_import
import sys
import traceback
import numpy as np
# noinspection PyPackageRequirements
from PIL import Image
import vision_definitions
from naoqi import ALProxy
from mlpy.modules import Module
from mlpy.constants import micro
from .detectors import BallDetector
kTopCamera = 0
kBottomCamera = 1
class CameraImage(object):
"""The camera image.
Parameters
----------
img : ndarray[Image]
The raw image data.
width : int
The width of the image.
height : int
The height of the image.
timestamp : float
The time the image was captured.
camera_id : {kTopCamera, kBottomCamera}
Whether the bottom or the top camera captured
the image.
Attributes
----------
image : ndarray[Image]
The raw image data.
width : int
The width of the image.
height : int
The height of the image.
timestamp : float
The time the image was captured.
camera_id : {kTopCamera, kBottomCamera}
Whether the bottom or the top camera captured
the image.
"""
def __init__(self, img, width, height, timestamp, camera_id):
self.image = img
self.width = width
self.height = height
self.timestamp = timestamp
self.camera_id = camera_id
class ImageProcessor(Module):
"""The image processor.
Processes the images provided by the bottom and top cameras and
detects relevant objects.
Parameters
----------
pip : str
The IP of the agent for which to process the camera images.
pport : int
The port of the agent for which to process the camera images.
Notes
-----
For object detection, the bottom camera *always* takes precedence.
Currently the image processor only handles ball detection.
"""
def __init__(self, pip, pport):
super(ImageProcessor, self).__init__(self._generate_id(pip, pport))
self._cameraProxy = None
self._subscriber_btm_id = None
self._subscriber_top_id = None
self._resolution = vision_definitions.kQVGA
self._ball_detector = BallDetector(self._resolution)
self._initialize(pip, pport)
def reset(self, t, **kwargs):
"""Reset the image processor.
Ensure that the subscriptions to the cameras are valid.
Parameters
----------
t : float
The current time (sec)
kwargs : dict, optional
Non-positional parameters, optional.
"""
super(ImageProcessor, self).reset(t, **kwargs)
self._initialize(self.mid.split(':')[1], int(self.mid.split(':')[2]))
def enter(self, t):
"""Perform preliminary setup.
Parameters
----------
t : float
The current time (sec).
"""
super(ImageProcessor, self).enter(t)
self._ball_detector.start()
def update(self, dt):
"""Update the image processor.
Capture images from the camera and detect objects.
The bottom camera takes precedence.
Parameters
----------
dt : float
The elapsed time (sec)
"""
super(ImageProcessor, self).update(dt)
image_btm = self._get_image(kBottomCamera)
if not self._ball_detector.update(image_btm):
image_top = self._get_image(kTopCamera)
self._ball_detector.update(image_top)
def exit(self):
"""Exit the image processor.
Stop the detectors and unsubscribe from the cameras.
"""
super(ImageProcessor, self).exit()
self._ball_detector.stop()
try:
self._cameraProxy.ping()
if self._subscriber_top_id is not None:
self._cameraProxy.unsubscribe(self._subscriber_top_id)
if self._subscriber_btm_id is not None:
self._cameraProxy.unsubscribe(self._subscriber_btm_id)
except:
pass
def get_resolution(self):
"""Returns the image resolution.
Returns
-------
tuple[int]
The resolution of the image.
"""
resolution = None
if self._resolution == vision_definitions.kQQVGA:
resolution = (160, 120)
if self._resolution == vision_definitions.kQVGA:
resolution = (320, 240)
if self._resolution == vision_definitions.kVGA:
resolution = (640, 480)
if self._resolution == vision_definitions.k4VGA or self._resolution == vision_definitions.k960p:
resolution = (1280, 960)
return resolution
def _initialize(self, pip, pport):
"""Initializes the cameras
Ensure that valid proxies to the Nao cameras exist
and subscribe to the cameras.
Parameters
----------
pip : str
The IP of the agent for which to process the camera images.
pport : int
The port of the agent for which to process the camera images.
"""
try:
# noinspection PyBroadException
try:
self._cameraProxy.ping()
except:
self._cameraProxy = ALProxy("ALVideoDevice", pip, pport)
color_space = vision_definitions.kBGRColorSpace
fps = 30
self._subscriber_btm_id = self._cameraProxy.subscribeCamera("ImageProcessor", kBottomCamera,
self._resolution, color_space, fps)
self._subscriber_top_id = self._cameraProxy.subscribeCamera("ImageProcessor", kTopCamera,
self._resolution, color_space, fps)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
sys.exit(1)
def _get_image(self, camera_id):
"""Returns the image for the given camera.
Capture the camera image from the camera with the given id.
Parameters
----------
camera_id : {kTopCamera, kBottomCamera}
Whether the bottom or the top camera captured
the image.
Returns
-------
CameraImage
The captured camera image.
"""
subscriber_id = self._subscriber_btm_id if camera_id == kBottomCamera else self._subscriber_top_id
cam_img = self._cameraProxy.getImageRemote(subscriber_id)
try:
img_width = cam_img[0]
img_height = cam_img[1]
im = Image.frombytes("RGB", (img_width, img_height), cam_img[6])
timestamp = cam_img[4] + cam_img[5] / micro
return CameraImage(np.array(im), img_width, img_height, timestamp, cam_img[7])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
sys.exit(1)
def _generate_id(self, pip, pport):
return "%s.%s:%s:%i:%i" % (self.__class__.__module__, self.__class__.__name__, pip, pport, next(self._ids))
| |
# $Id: frontmatter.py 8671 2021-04-07 12:09:51Z milde $
# Author: David Goodger, Ueli Schlaepfer <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
if sys.version_info >= (3, 0):
unicode = str # noqa
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return False
# Transfer the section's attributes to the node:
# NOTE: Change `replace` to False to NOT replace attributes that
# already exist in node with those in section.
# NOTE: Remove `and_source` to NOT copy the 'source'
# attribute from section
node.update_all_atts_concatenating(section, replace=True, and_source=True)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return True
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
subsection, index = self.candidate_index(node)
if index is None:
return False
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle
# NOTE: Change `replace` to False to NOT replace attributes
# that already exist in node with those in section.
# NOTE: Remove `and_source` to NOT copy the 'source'
# attribute from section.
subtitle.update_all_atts_concatenating(subsection, replace=True, and_source=True)
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return True
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if (index is None or len(node) > (index + 1)
or not isinstance(node[index], nodes.section)):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if self.document.settings.setdefault('doctitle_xform', True):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not self.document.settings.setdefault('sectsubtitle_xform', True):
return
for section in self.document._traverse(nodes.section):
# On our way through the node tree, we are modifying it
# but only the not-yet-visited part, so that the iterator
# returned by _traverse() is not corrupted.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not self.document.settings.setdefault('docinfo_xform', True):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
title[0].rawsource = labels[canonical]
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
# if normedname not in bibliofields:
classvalue = nodes.make_id(normedname)
if classvalue:
field['classes'].append(classvalue)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
"""Return list of Text nodes for authornames.
The set of separators is locale dependent (default: ";"- or ",").
"""
# @@ keep original formatting? (e.g. ``:authors: A. Test, *et-al*``)
text = ''.join(unicode(node)
for node in field[1].traverse(nodes.Text))
if not text:
raise TransformError
for authorsep in self.language.author_separators:
# don't split at escaped `authorsep`:
pattern = '(?<!\x00)%s' % authorsep
authornames = re.split(pattern, text)
if len(authornames) > 1:
break
authornames = (name.strip() for name in authornames)
authors = [[nodes.Text(name, utils.unescape(name, True))]
for name in authornames if name]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if isinstance(item, nodes.comment):
continue
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, (nodes.paragraph, nodes.comment)):
raise TransformError
authors = [item.children for item in field[1]
if not isinstance(item, nodes.comment)]
return authors
| |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova management.
"""
from __future__ import print_function
import argparse
import os
import sys
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.compute import flavors
from nova import config
from nova import context
from nova import db
from nova.db import migration
from nova import exception
from nova.openstack.common import cliutils
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('multi_host', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('vlan_start', 'nova.network.manager')
CONF.import_opt('vpn_start', 'nova.network.manager')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
QUOTAS = quota.QUOTAS
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various volume id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2utils.ec2_vol_id_to_uuid(object_id)
else:
return object_id
class VpnCommands(object):
"""Class for managing VPNs."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--ip', metavar='<IP Address>', help='IP Address')
@args('--port', metavar='<Port>', help='Port')
def change(self, project_id, ip, port):
"""Change the ip and port for a vpn.
this will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted
"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_update(admin_context,
network['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', metavar='<bpython|ipython|python >',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
import IPython
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', metavar='<path>', help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print(caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
exit(1)
class ProjectCommands(object):
"""Class for managing projects."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--user', dest='user_id', metavar='<User name>',
help='User name')
@args('--key', metavar='<key>', help='Key')
@args('--value', metavar='<value>', help='Value')
def quota(self, project_id, user_id=None, key=None, value=None):
"""Create, update or display quotas for project/user
If no quota key is provided, the quota will be displayed.
If a valid quota key is provided and it does not exist,
it will be created. Otherwise, it will be updated.
"""
ctxt = context.get_admin_context()
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
user_id = None
quota = QUOTAS.get_project_quotas(ctxt, project_id)
# if key is None, that means we need to show the quotas instead
# of updating them
if key:
settable_quotas = QUOTAS.get_settable_quotas(ctxt,
project_id,
user_id=user_id)
if key in quota:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
if value.lower() == 'unlimited':
value = -1
if int(value) < -1:
print(_('Quota limit must be -1 or greater.'))
return(2)
if ((int(value) < minimum) and
(maximum != -1 or (maximum == -1 and int(value) != -1))):
print(_('Quota limit must be greater than %s.') % minimum)
return(2)
if maximum != -1 and int(value) > maximum:
print(_('Quota limit must be less than %s.') % maximum)
return(2)
try:
db.quota_create(ctxt, project_id, key, value,
user_id=user_id)
except exception.QuotaExists:
db.quota_update(ctxt, project_id, key, value,
user_id=user_id)
else:
print(_('%(key)s is not a valid quota key. Valid options are: '
'%(options)s.') % {'key': key,
'options': ', '.join(quota)})
return(2)
print_format = "%-36s %-10s %-10s %-10s"
print(print_format % (
_('Quota'),
_('Limit'),
_('In Use'),
_('Reserved')))
# Retrieve the quota after update
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
quota = QUOTAS.get_project_quotas(ctxt, project_id)
for key, value in quota.iteritems():
if value['limit'] < 0 or value['limit'] is None:
value['limit'] = 'unlimited'
print(print_format % (key, value['limit'], value['in_use'],
value['reserved']))
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
"""Deletes data associated with project."""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups:
db.security_group_destroy(admin_context, group['id'])
AccountCommands = ProjectCommands
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all fixed ips (optionally by host)."""
ctxt = context.get_admin_context()
try:
if host is None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_by_host(ctxt, host)
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
instances = db.instance_get_all(context.get_admin_context())
instances_by_uuid = {}
for instance in instances:
instances_by_uuid[instance['uuid']] = instance
print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('hostname'),
_('host')))
all_networks = {}
try:
# use network_get_all to retrieve all existing networks
# this is to ensure that IPs associated with deleted networks
# will not throw exceptions.
for network in db.network_get_all(context.get_admin_context()):
all_networks[network.id] = network
except exception.NoNetworksFound:
# do not have any networks, so even if there are IPs, these
# IPs should have been deleted ones, so return.
print(_('No fixed IP found.'))
return
has_ip = False
for fixed_ip in fixed_ips:
hostname = None
host = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
if fixed_ip.get('instance_uuid'):
instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
if instance:
hostname = instance['hostname']
host = instance['host']
else:
print(_('WARNING: fixed ip %s allocated to missing'
' instance') % str(fixed_ip['address']))
print("%-18s\t%-15s\t%-15s\t%s" % (
network['cidr'],
fixed_ip['address'],
hostname, host))
if not has_ip:
print(_('No fixed IP found.'))
@args('--address', metavar='<ip address>', help='IP address')
def reserve(self, address):
"""Mark fixed ip as reserved
arguments: address
"""
return self._set_reserved(address, True)
@args('--address', metavar='<ip address>', help='IP address')
def unreserve(self, address):
"""Mark fixed ip as free to use
arguments: address
"""
return self._set_reserved(address, False)
def _set_reserved(self, address, reserved):
ctxt = context.get_admin_context()
try:
fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
if fixed_ip is None:
raise exception.NotFound('Could not find address')
db.fixed_ip_update(ctxt, fixed_ip['address'],
{'reserved': reserved})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
@staticmethod
def address_to_hosts(addresses):
"""Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except ValueError:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
elif net.size >= 1000000:
# NOTE(dripton): If we generate a million IPs and put them in
# the database, the system will slow to a crawl and/or run
# out of memory and crash. This is clearly a misconfiguration.
reason = _("Too many IP addresses will be generated. Please "
"increase /%s to reduce the number generated."
) % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
@args('--ip_range', metavar='<range>', help='IP range')
@args('--pool', metavar='<pool>', help='Optional pool')
@args('--interface', metavar='<interface>', help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
"""Creates floating ips for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
if not interface:
interface = CONF.public_interface
ips = ({'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range))
try:
db.floating_ip_bulk_create(admin_context, ips)
except exception.FloatingIpExists as exc:
# NOTE(simplylizz): Maybe logging would be better here
# instead of printing, but logging isn't used here and I
# don't know why.
print('error: %s' % exc)
return(1)
@args('--ip_range', metavar='<range>', help='IP range')
def delete(self, ip_range):
"""Deletes floating ips by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
for address in self.address_to_hosts(ip_range))
db.floating_ip_bulk_destroy(admin_context, ips)
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all floating ips (optionally by host).
Note: if host is given, only active floating IPs are returned
"""
ctxt = context.get_admin_context()
try:
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
except exception.NoFloatingIpsDefined:
print(_("No floating IP addresses have been defined."))
return
for floating_ip in floating_ips:
instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'],
floating_ip['address'],
instance_uuid,
floating_ip['pool'],
floating_ip['interface']))
class NetworkCommands(object):
"""Class for managing networks."""
def _using_valid_network_api_class(self):
if utils.is_neutron():
print(_("ERROR: Network commands are not supported when using the "
"Neutron API. Use python-neutronclient instead."))
return False
return True
@args('--label', metavar='<label>', help='Label for network (ex: public)')
@args('--fixed_range_v4', dest='cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet (ex: 10.0.0.0/8)')
@args('--num_networks', metavar='<number>',
help='Number of networks to create')
@args('--network_size', metavar='<number>',
help='Number of IPs per network')
@args('--vlan', dest='vlan_start', metavar='<vlan id>', help='vlan id')
@args('--vpn', dest='vpn_start', help='vpn start')
@args('--fixed_range_v6', dest='cidr_v6',
help='IPv6 subnet (ex: fe80::/64')
@args('--gateway', help='gateway')
@args('--gateway_v6', help='ipv6 gateway')
@args('--bridge', metavar='<bridge>',
help='VIFs on this network are connected to this bridge')
@args('--bridge_interface', metavar='<bridge interface>',
help='the bridge is connected to this interface')
@args('--multi_host', metavar="<'T'|'F'>",
help='Multi host')
@args('--dns1', metavar="<DNS Address>", help='First DNS')
@args('--dns2', metavar="<DNS Address>", help='Second DNS')
@args('--uuid', metavar="<network uuid>", help='Network UUID')
@args('--fixed_cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet for fixed IPS (ex: 10.20.0.0/16)')
@args('--project_id', metavar="<project id>",
help='Project id')
@args('--priority', metavar="<number>", help='Network interface priority')
def create(self, label=None, cidr=None, num_networks=None,
network_size=None, multi_host=None, vlan_start=None,
vpn_start=None, cidr_v6=None, gateway=None,
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed ips for host by range."""
if not self._using_valid_network_api_class():
return(2)
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
def list(self):
"""List all created networks."""
if not self._using_valid_network_api_class():
return(2)
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print(_fmt % (_('id'),
_('IPv4'),
_('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
_('VlanID'),
_('project'),
_("uuid")))
try:
# Since network_get_all can throw exception.NoNetworksFound
# for this command to show a nice result, this exception
# should be caught and handled as such.
networks = db.network_get_all(context.get_admin_context())
except exception.NoNetworksFound:
print(_('No networks found'))
else:
for network in networks:
print(_fmt % (network.id,
network.cidr,
network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
network.vlan,
network.project_id,
network.uuid))
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to delete')
@args('--uuid', metavar='<uuid>', help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
"""Deletes a network."""
if not self._using_valid_network_api_class():
return(2)
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
if "NeutronManager" in CONF.network_manager:
if uuid is None:
raise Exception(_("UUID is required to delete "
"Neutron Networks"))
if fixed_range:
raise Exception(_("Deleting by fixed_range is not supported "
"with the NeutronManager"))
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to modify')
@args('--project', metavar='<project name>',
help='Project name to associate')
@args('--host', metavar='<host>', help='Host to associate')
@args('--disassociate-project', action="store_true", dest='dis_project',
default=False, help='Disassociate Network from Project')
@args('--disassociate-host', action="store_true", dest='dis_host',
default=False, help='Disassociate Host from Project')
def modify(self, fixed_range, project=None, host=None,
dis_project=None, dis_host=None):
"""Associate/Disassociate Network with Project and/or Host
arguments: network project host
leave any field blank to ignore it
"""
if not self._using_valid_network_api_class():
return(2)
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
#User can choose the following actions each for project and host.
#1) Associate (set not None value given by project/host parameter)
#2) Disassociate (set None by disassociate parameter)
#3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
net['host'] = None
# The --disassociate-X are boolean options, but if they user
# mistakenly provides a value, it will be used as a positional argument
# and be erroneously interepreted as some other parameter (e.g.
# a project instead of host value). The safest thing to do is error-out
# with a message indicating that there is probably a problem with
# how the disassociate modifications are being used.
if dis_project or dis_host:
if project or host:
error_msg = "ERROR: Unexpected arguments provided. Please " \
"use separate commands."
print(error_msg)
return(1)
db.network_update(admin_context, network['id'], net)
return
if project:
net['project_id'] = project
if host:
net['host'] = host
db.network_update(admin_context, network['id'], net)
class VmCommands(object):
"""Class for mangaging VM instances."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Show a list of all instances."""
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5s" % (_('instance'),
_('node'),
_('type'),
_('state'),
_('launched'),
_('image'),
_('kernel'),
_('ramdisk'),
_('project'),
_('user'),
_('zone'),
_('index'))))
if host is None:
instances = db.instance_get_all(context.get_admin_context())
else:
instances = db.instance_get_all_by_host(
context.get_admin_context(), host)
for instance in instances:
instance_type = flavors.extract_flavor(instance)
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5d" % (instance['display_name'],
instance['host'],
instance_type['name'],
instance['vm_state'],
instance['launched_at'],
instance['image_ref'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
instance['launch_index'])))
class ServiceCommands(object):
"""Enable and disable running services."""
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def list(self, host=None, service=None):
"""Show a list of all running services. Filter by host & service
name
"""
servicegroup_api = servicegroup.API()
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (
_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated_At')))
for svc in services:
alive = servicegroup_api.service_is_up(svc)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print(print_format % (svc['binary'], svc['host'],
svc['availability_zone'], active, art,
svc['updated_at']))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def enable(self, host, service):
"""Enable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': False})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s enabled.") %
{'service': service, 'host': host}))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def disable(self, host, service):
"""Disable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': True})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s disabled.") %
{'service': service, 'host': host}))
def _show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns:
example format is below::
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
'vcpus_used': 12, 'memory_mb_used': 10240,
'local_gb_used': 64}
"""
# Getting compute node info and related instances info
service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
'vcpus_used': compute_ref['vcpus_used'],
'memory_mb_used': compute_ref['memory_mb_used'],
'local_gb_used': compute_ref['local_gb_used']}
usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
root = [i['root_gb'] for i in instance_refs
if i['project_id'] == project_id]
ephemeral = [i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
usage[project_id] = {'vcpus': sum(vcpus),
'memory_mb': sum(mem),
'root_gb': sum(root),
'ephemeral_gb': sum(ephemeral)}
return {'resource': resource, 'usage': usage}
@args('--host', metavar='<host>', help='Host')
def describe_resource(self, host):
"""Describes cpu/memory/hdd info for host.
:param host: hostname.
"""
try:
result = self._show_host_resources(context.get_admin_context(),
host=host)
except exception.NovaException as ex:
print(_("error: %s") % ex)
return 2
if not isinstance(result, dict):
print(_('An unexpected error has occurred.'))
print(_('[Result]'), result)
else:
# Printing a total and used_now
# (NOTE)The host name width 16 characters
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {"a": _('HOST'),
"b": _('PROJECT'),
"c": _('cpu'),
"d": _('mem(mb)'),
"e": _('hdd')})
print(('%(a)-16s(total)%(b)26s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus'],
"c": result['resource']['memory_mb'],
"d": result['resource']['local_gb']}))
print(('%(a)-16s(used_now)%(b)23s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus_used'],
"c": result['resource']['memory_mb_used'],
"d": result['resource']['local_gb_used']}))
# Printing a used_max
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for p_id, val in result['usage'].items():
cpu_sum += val['vcpus']
mem_sum += val['memory_mb']
hdd_sum += val['root_gb']
hdd_sum += val['ephemeral_gb']
print('%(a)-16s(used_max)%(b)23s%(c)8s%(d)8s' % {"a": host,
"b": cpu_sum,
"c": mem_sum,
"d": hdd_sum})
for p_id, val in result['usage'].items():
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {
"a": host,
"b": p_id,
"c": val['vcpus'],
"d": val['memory_mb'],
"e": val['root_gb'] + val['ephemeral_gb']})
class HostCommands(object):
"""List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print("%-25s\t%-15s" % (_('host'),
_('zone')))
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print("%-25s\t%-15s" % (h['host'], h['availability_zone']))
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('--version', metavar='<version>', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', metavar='<number>',
help='Maximum number of deleted rows to archive')
def archive_deleted_rows(self, max_rows):
"""Move up to max_rows deleted rows from production tables to shadow
tables.
"""
if max_rows is not None:
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return(1)
admin_context = context.get_admin_context()
db.archive_deleted_rows(admin_context, max_rows)
class FlavorCommands(object):
"""Class for managing flavors.
Note instance type is a deprecated synonym for flavor.
"""
def _print_flavors(self, val):
is_public = ('private', 'public')[val["is_public"] == 1]
print(("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
val["name"], val["memory_mb"], val["vcpus"], val["root_gb"],
val["ephemeral_gb"], val["flavorid"], val["swap"],
val["rxtx_factor"], is_public, val["extra_specs"]))
@args('--name', metavar='<name>',
help='Name of flavor')
@args('--memory', metavar='<memory size>', help='Memory size')
@args('--cpu', dest='vcpus', metavar='<num cores>', help='Number cpus')
@args('--root_gb', metavar='<root_gb>', help='Root disk size')
@args('--ephemeral_gb', metavar='<ephemeral_gb>',
help='Ephemeral disk size')
@args('--flavor', dest='flavorid', metavar='<flavor id>',
help='Flavor ID')
@args('--swap', metavar='<swap>', help='Swap')
@args('--rxtx_factor', metavar='<rxtx_factor>', help='rxtx_factor')
@args('--is_public', metavar='<is_public>',
help='Make flavor accessible to the public')
def create(self, name, memory, vcpus, root_gb, ephemeral_gb=0,
flavorid=None, swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
try:
flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb, flavorid=flavorid,
swap=swap, rxtx_factor=rxtx_factor,
is_public=is_public)
except exception.InvalidInput as e:
print(_("Must supply valid parameters to create flavor"))
print(e)
return 1
except exception.FlavorExists:
print(_("Flavor exists."))
print(_("Please ensure flavor name and flavorid are "
"unique."))
print(_("Currently defined flavor names and flavorids:"))
print()
self.list()
return 2
except Exception:
print(_("Unknown error"))
return 3
else:
print(_("%s created") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def delete(self, name):
"""Marks flavors as deleted."""
try:
flavors.destroy(name)
except exception.FlavorNotFound:
print(_("Valid flavor name is required"))
return 1
except db_exc.DBError as e:
print(_("DB Error: %s") % e)
return(2)
except Exception:
return(3)
else:
print(_("%s deleted") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def list(self, name=None):
"""Lists all active or specific flavors."""
try:
if name is None:
inst_types = flavors.get_all_flavors()
else:
inst_types = flavors.get_flavor_by_name(name)
except db_exc.DBError as e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
for k, v in inst_types.iteritems():
self._print_flavors(v)
else:
self._print_flavors(inst_types)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key of the key/value pair')
@args('--value', metavar='<value>', help='The value of the key/value pair')
def set_key(self, name, key, value=None):
"""Add key/value pair to specified flavor's extra_specs."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.FlavorNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
ext_spec = {key: value}
db.flavor_extra_specs_update_or_create(
ctxt,
inst_type["flavorid"],
ext_spec)
print((_("Key %(key)s set to %(value)s on instance "
"type %(name)s") %
{'key': key, 'value': value, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key to be deleted')
def unset_key(self, name, key):
"""Delete the specified extra spec for flavor."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.FlavorNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
db.flavor_extra_specs_delete(
ctxt,
inst_type["flavorid"],
key)
print((_("Key %(key)s on flavor %(name)s unset") %
{'key': key, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build."""
ctxt = context.get_admin_context()
db.agent_build_create(ctxt, {'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: None)')
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>
"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in by_hypervisor.iteritems():
if hypervisor and key != hypervisor:
continue
print(_('Hypervisor: %s') % key)
print(fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32))
for agent_build in buildlist:
print(fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash))
print(' %s' % agent_build.url)
print()
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
linenum = len(lines) - index
print((_('Line %(linenum)d : %(line)s') %
{'linenum': linenum, 'line': line}))
if error_found == 0:
print(_('No errors in logfiles!'))
@args('--num_entries', metavar='<number of entries>',
help='number of entries(default: 10)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_('Unable to find system log file!'))
return(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_('Last %s nova syslog entries:-') % (entries))
for line in lines:
if line.find("nova") > 0:
count += 1
print("%s" % (line))
if count == entries:
break
if count == 0:
print(_('No nova entries in syslog!'))
class CellCommands(object):
"""Commands for managing cells."""
@args('--name', metavar='<name>', help='Name for the new cell')
@args('--cell_type', metavar='<parent|child>',
help='Whether the cell is a parent or child')
@args('--username', metavar='<username>',
help='Username for the message broker in this cell')
@args('--password', metavar='<password>',
help='Password for the message broker in this cell')
@args('--hostname', metavar='<hostname>',
help='Address of the message broker in this cell')
@args('--port', metavar='<number>',
help='Port number of the message broker in this cell')
@args('--virtual_host', metavar='<virtual_host>',
help='The virtual host of the message broker in this cell')
@args('--woffset', metavar='<float>')
@args('--wscale', metavar='<float>')
def create(self, name, cell_type='child', username=None, password=None,
hostname=None, port=None, virtual_host=None,
woffset=None, wscale=None):
if cell_type not in ['parent', 'child']:
print("Error: cell type must be 'parent' or 'child'")
return(2)
# Set up the transport URL
transport_host = messaging.TransportHost(hostname=hostname,
port=int(port),
username=username,
password=password)
transport_url = rpc.get_transport_url()
transport_url.hosts.append(transport_host)
transport_url.virtual_host = virtual_host
is_parent = cell_type == 'parent'
values = {'name': name,
'is_parent': is_parent,
'transport_url': str(transport_url),
'weight_offset': float(woffset),
'weight_scale': float(wscale)}
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
@args('--cell_name', metavar='<cell_name>',
help='Name of the cell to delete')
def delete(self, cell_name):
ctxt = context.get_admin_context()
db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
cells = db.cell_get_all(ctxt)
fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
print(fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
'Port', 'VHost'))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
for cell in cells:
url = rpc.get_transport_url(cell.transport_url)
host = url.hosts[0] if url.hosts else messaging.TransportHost()
print(fmt % (cell.id, cell.name,
'parent' if cell.is_parent else 'child',
host.username, host.hostname,
host.port, url.virtual_host))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
CATEGORIES = {
'account': AccountCommands,
'agent': AgentBuildCommands,
'cell': CellCommands,
'db': DbCommands,
'fixed': FixedIpCommands,
'flavor': FlavorCommands,
'floating': FloatingIpCommands,
'host': HostCommands,
'logs': GetLogCommands,
'network': NetworkCommands,
'project': ProjectCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'vm': VmCommands,
'vpn': VpnCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
parser = subparsers.add_parser('version')
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
try:
config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run nova-manage as root.'))
return(2)
if CONF.category.name == "version":
print(version.version_string_with_package())
return(0)
if CONF.category.name == "bash-completion":
if not CONF.category.query_category:
print(" ".join(CATEGORIES.keys()))
elif CONF.category.query_category in CATEGORIES:
fn = CATEGORIES[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
return(0)
fn = CONF.category.action_fn
fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.string_types):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
cliutils.validate_args(fn, *fn_args, **fn_kwargs)
except cliutils.MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
print(e)
return(1)
try:
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return(ret)
except Exception:
print(_("Command failed, please check log for more info"))
raise
| |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import logging
import os
import re
import selectors
import threading
import time
from contextlib import closing
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.collections import ensure_str_list
from pants.util.dirutil import read_file, safe_file_dump, safe_open
from pants.util.memo import memoized_classproperty
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super().__init__(name="nailgun", metadata_base_dir=metadata_base_dir)
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(
arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX)
for arg in proc.cmdline()
)
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info("killing nailgun server pid={pid}".format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, FingerprintedProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and re-
used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r".*\s+port\s+(\d+)\.$")
# Used to identify if we own a given nailgun server.
FINGERPRINT_CMD_KEY = "-Dpants.nailgun.fingerprint"
_PANTS_NG_ARG_PREFIX = "-Dpants.buildroot"
_PANTS_OWNER_ARG_PREFIX = "-Dpants.nailgun.owner"
@memoized_classproperty
def _PANTS_NG_BUILDROOT_ARG(cls):
return "=".join((cls._PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_PROCESS_NAME = "java"
def __init__(
self,
identity,
workdir,
nailgun_classpath,
distribution,
startup_timeout=10,
connect_timeout=10,
connect_attempts=5,
metadata_base_dir=None,
):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(
self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir,
)
if not isinstance(workdir, str):
raise ValueError(
"Workdir must be a path string, not: {workdir}".format(workdir=workdir)
)
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, "stdout")
self._ng_stderr = os.path.join(workdir, "stderr")
self._nailgun_classpath = ensure_str_list(nailgun_classpath)
self._startup_timeout = startup_timeout
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return "NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})".format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket
)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return "=".join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return "=".join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
encoded_jvm_options = [option.encode() for option in sorted(jvm_options)]
encoded_classpath = [cp.encode() for cp in sorted(classpath)]
encoded_java_version = repr(java_version).encode()
for item in (encoded_jvm_options, encoded_classpath, encoded_java_version):
digest.update(str(item).encode())
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args):
"""Runner factory.
Called via Executor.execute().
"""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = None
try:
nailgun = self._get_nailgun_client(
jvm_options, classpath, stdout, stderr, stdin
)
logger.debug(
"Executing via {ng_desc}: {cmd}".format(ng_desc=nailgun, cmd=this.cmd)
)
return nailgun.execute(main, cwd, *args)
except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e:
self.terminate()
raise self.Error(
"Problem launching via {ng_desc} command {main} {args}: {msg}".format(
ng_desc=nailgun or "<no nailgun connection>",
main=main,
args=" ".join(args),
msg=e,
)
)
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint)
logging.debug(
"Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} "
"new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}".format(
nailgun=self._identity,
up=updated,
run=running,
old_fp=self.fingerprint,
new_fp=new_fingerprint,
old_dist=self.cmd,
new_dist=self._distribution.java,
)
)
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner.
It handles creation of the running nailgun server as well as creation of the client.
"""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug(
"Found running nailgun server that needs updating, killing {server}".format(
server=self._identity
)
)
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(
new_fingerprint, jvm_options, classpath, stdout, stderr, stdin
)
return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
class InitialNailgunConnectTimedOut(Exception):
_msg_fmt = """Failed to read nailgun output after {timeout} seconds!
Stdout:
{stdout}
Stderr:
{stderr}"""
def __init__(self, timeout, stdout, stderr):
msg = self._msg_fmt.format(timeout=timeout, stdout=stdout, stderr=stderr)
super(NailgunExecutor.InitialNailgunConnectTimedOut, self).__init__(msg)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun
stdout."""
start_time = time.time()
accumulated_stdout = ""
def calculate_remaining_time():
return time.time() - (start_time + timeout)
def possibly_raise_timeout(remaining_time):
if remaining_time > 0:
stderr = read_file(self._ng_stderr, binary_mode=True)
raise self.InitialNailgunConnectTimedOut(
timeout=timeout, stdout=accumulated_stdout, stderr=stderr,
)
# NB: We use PollSelector, rather than the more efficient DefaultSelector, because
# DefaultSelector results in using the epoll() syscall on Linux, which does not work with
# regular text files like ng_stdout. See https://stackoverflow.com/a/8645770.
with selectors.PollSelector() as selector, safe_open(self._ng_stdout, "r") as ng_stdout:
selector.register(ng_stdout, selectors.EVENT_READ)
while 1:
remaining_time = calculate_remaining_time()
possibly_raise_timeout(remaining_time)
events = selector.select(timeout=-1 * remaining_time)
if events:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr)
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug(
"Verified new ng server is connectable at {}".format(sock.getpeername())
)
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug(
"Failed to connect to ng after {} attempts".format(self._connect_attempts)
)
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, b"", mode="wb")
safe_file_dump(self._ng_stderr, b"", mode="wb")
jvm_options = jvm_options + [
self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint),
]
post_fork_child_opts = dict(
fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr,
)
logger.debug(
"Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}".format(
i=self._identity, f=fingerprint, j=jvm_options, cp=classpath
)
)
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._startup_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug(
"Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}".format(
i=self._identity, f=fingerprint, pid=self.pid, port=self.socket
)
)
client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
"""Matches only processes started from the current buildroot."""
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
"""A ProcessManager.is_alive() override that ensures buildroot flags are present in the
process command line arguments."""
return super().is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(
classpath=classpath,
main="com.martiansoftware.nailgun.NGServer",
jvm_options=jvm_options,
args=[":0"],
stdin=safe_open("/dev/null", "r"),
stdout=safe_open(self._ng_stdout, "w"),
stderr=safe_open(self._ng_stderr, "w"),
close_fds=True,
)
self.write_pid(subproc.pid)
| |
import logging
import re
import sys
from unidecode import unidecode
from sqlalchemy import func, Integer, distinct
from sqlalchemy.orm import subqueryload
from collections import defaultdict
from datetime import datetime
import suggestive.mstat as mstat
from suggestive.db.session import session_scope
from suggestive.db.model import Album, Track, Scrobble, LastfmTrackInfo
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Suggestion(object):
def __init__(self, album, order=None):
self.album = album
self.order = order
class OrderDecorator(object):
TRUTHY = (True, 'True', 'TRUE', 'true', 1, 'yes')
NONE = (None, 'None', 'null', '')
def __repr__(self):
return '<{}()>'.format(self.__class__.__name__)
def order(self, albums, session, mpd):
raise NotImplementedError
class BaseOrder(OrderDecorator):
"""Initialize all albums with unity order"""
def order(self, albums, session, mpd):
db_albums = session.query(Album).\
options(
subqueryload(Album.artist),
).\
all()
ordered = defaultdict(lambda: 1.0)
ordered.update({db_album: 0 if db_album.ignored else 1.0
for db_album in db_albums})
return ordered
class AlbumFilter(OrderDecorator):
"""Show albums whose name contains a string"""
def __init__(self, *name_pieces):
name = ' '.join(name_pieces)
self.name = name
self.name_rgx = re.compile(name, re.I)
def __repr__(self):
return '<AlbumFilter({})>'.format(self.name)
def order(self, albums, session, mpd):
return {
album: order for album, order in albums.items()
if re.search(self.name_rgx, album.name) or
re.search(self.name_rgx, unidecode(album.name))
}
class ArtistFilter(OrderDecorator):
"""Show albums for which the artist name contains a string"""
def __init__(self, *name_pieces):
name = ' '.join(name_pieces)
self.name = name
self.name_rgx = re.compile(name, re.I)
def __repr__(self):
return '<ArtistFilter({})>'.format(self.name)
def order(self, albums, session, mpd):
return {
album: order for album, order in albums.items()
if re.search(self.name_rgx, album.artist.name) or
re.search(self.name_rgx, unidecode(album.artist.name))
}
class SortOrder(OrderDecorator):
"""Sort by 'Artist - Album'"""
def __init__(self, ignore_artist_the=True, reverse=False):
self.ignore_artist_the = bool(ignore_artist_the)
self.reverse = (not bool(reverse))
def _format(self, album):
artist = album.artist.name
if self.ignore_artist_the and artist.lower().startswith('the '):
artist = artist[4:] + ', ' + artist[:3]
return '{} - {}'.format(artist, album.name)
def order(self, albums, session, mpd):
sorted_albums = sorted(albums, key=self._format, reverse=self.reverse)
return {album: i for i, album in enumerate(sorted_albums, 1)}
class ModifiedOrder(OrderDecorator):
"""Sort by modified date"""
FMT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, reverse=False):
self.reverse = bool(reverse)
@classmethod
def get_date(cls, album, mpd):
track_info = mpd.search('album', album.name)
if not track_info:
return None
dates = (datetime.strptime(info['last-modified'], cls.FMT)
for info in track_info)
return max(dates)
def order(self, albums, session, mpd):
sorted_albums = sorted(
albums,
key=lambda album: self.get_date(album, mpd),
reverse=self.reverse)
return {album: i for i, album in enumerate(sorted_albums, 1)}
class FractionLovedOrder(OrderDecorator):
"""Order by fraction of tracks loved"""
def __init__(self, reverse=False, penalize_unloved=False, **kwArgs):
super(FractionLovedOrder, self).__init__()
self.reverse = bool(reverse)
maximum = kwArgs.pop('max', None)
minimum = kwArgs.pop('min', None)
if kwArgs:
arg = list(kwArgs.keys())[0]
raise TypeError("FractionLovedOrder got an unexpected keyword "
"argument '{}'".format(arg))
self.penalize = penalize_unloved
if minimum in self.NONE:
minimum = 0
if maximum in self.NONE:
maximum = 1
try:
minimum, maximum = float(minimum), float(maximum)
except (TypeError, ValueError) as err:
raise TypeError(*err.args)
if minimum > maximum:
minimum, maximum = maximum, minimum
min_ = min(max(minimum, 0), 1)
self.f_max = max(min(maximum, 1), min_)
self.f_min = min(max(minimum, 0), self.f_max)
def __repr__(self):
return '<FractionLovedOrder({}, {}, {})>'.format(
self.f_min, self.f_max, self.penalize)
def order(self, albums, session, mpd):
results = session.query(Album).\
join(Track).\
outerjoin(LastfmTrackInfo).\
add_columns(func.count(Track.id),
func.sum(LastfmTrackInfo.loved, type_=Integer)).\
group_by(Album.id).\
all()
neworder = defaultdict(lambda: 1.0, albums.items())
for album, n_tracks, n_loved in results:
if album not in neworder or n_tracks == 0:
continue
if n_loved is None:
n_loved = 0
f_loved = n_loved / n_tracks
if not (self.f_min <= f_loved <= self.f_max):
del neworder[album]
continue
neworder[album] *= self._order(n_loved, n_tracks)
return neworder
def _order(self, n_loved, n_tracks):
f_loved = n_loved / n_tracks
if n_loved > 0:
order = 1 + f_loved
else:
order = 1.0 / n_tracks if self.penalize else 1.0
return 1 / order if self.reverse else order
class PlaycountOrder(OrderDecorator):
"""Order items based on playcount/scrobbles"""
def __init__(self, reverse=False, **kwArgs):
self.reverse = bool(reverse)
maximum = kwArgs.pop('max', None)
minimum = kwArgs.pop('min', None)
if kwArgs:
arg = list(kwArgs.keys())[0]
raise TypeError("PlaycountOrder got an unexpected keyword "
"argument '{}'".format(arg))
if minimum in self.NONE:
minimum = 0
if maximum in self.NONE:
maximum = sys.maxsize
try:
minimum, maximum = float(minimum), float(maximum)
except (TypeError, ValueError) as err:
raise TypeError(*err.args)
if minimum > maximum:
minimum, maximum = maximum, minimum
self.plays_min = max(0, minimum)
self.plays_max = maximum
def __repr__(self):
return '<PlaycountOrder({}, {})>'.format(
self.plays_min, self.plays_max)
def order(self, albums, session, mpd):
results = session.query(Album).\
join(Track).\
outerjoin(Scrobble).\
add_columns(
func.count(distinct(Track.id)),
func.count(Scrobble.id)).\
group_by(Album.id).\
all()
neworder = defaultdict(lambda: 1.0, albums.items())
for album, n_tracks, n_scrobbles in results:
if album not in neworder or n_tracks == 0:
continue
plays = n_scrobbles / n_tracks
if self.plays_min <= plays <= self.plays_max:
factor = 1.0 + plays
if self.reverse:
neworder[album] /= factor
else:
neworder[album] *= factor
else:
if album in neworder:
del neworder[album]
return neworder
class Analytics(object):
def __init__(self, conf):
self.conf = conf
def order_albums(self, orderers=None):
mpd = mstat.initialize_mpd(self.conf)
if orderers is None:
orderers = [BaseOrder()]
ordered = {}
with session_scope(self.conf, commit=False) as session:
for album_orderer in orderers:
ordered = album_orderer.order(ordered, session, mpd)
# Order by score, then by artist name, then by album name
sorted_order = sorted(
ordered.items(),
reverse=True,
key=lambda item: (item[1], item[0]))
return [Suggestion(album, order) for album, order in sorted_order]
| |
#!/usr/bin/env python
# Copyright (c) 2015, Andre Lucas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import logging
import os.path
import re
from stat import *
from idmapper import *
from exceptions import *
log = logging.getLogger()
class NotHashableException(Exception):
pass
class UnsupportedFileTypeException(Exception):
pass
class LinkOperationOnNonLinkError(Exception):
pass
class PathMustBeAbsoluteError(Exception):
pass
class SymlinkPointsOutsideTreeError(Exception):
pass
class ContentsReadOnNonFileError(Exception):
pass
class HashStringFormatError(Exception):
pass
class BadSymlinkFormatError(HashStringFormatError):
pass
fileignore = [
re.compile(r'(~|\.swp)$'),
re.compile(r'\.nfs[0-9a-f]+$'),
]
dirignore = [
re.compile(r'^(?:CVS|\.git|\.svn)'),
]
class FileHash(object):
blankhash = "0" * 64
notsethash = "F" * 64
# Class-wide cache of uid<->user and gid<->group mappings.
mapper = UidGidMapper()
def __init__(self):
self.hash_safe = False
self.associated_dest_object = None
self.size_is_known = False
@classmethod
def init_from_file(cls, fpath, trim=False, root='', defer_read=False):
self = cls()
self.is_local_file = True
if not root.endswith("/"):
root += "/"
if log.isEnabledFor(logging.DEBUG):
log.debug("init_from_file: '%s' trim=%s root='%s' defer_read=%s",
fpath, trim, root, defer_read)
self.fullpath = fpath
self.defer_read = defer_read
self.has_read_contents = False
if log.isEnabledFor(logging.DEBUG):
log.debug("iff(): fullpath '%s'", self.fullpath)
if trim:
self.fpath = self.fullpath[len(root):]
else:
self.fpath = self.fullpath
self.stat = os.lstat(self.fullpath)
mode = self.mode = self.stat.st_mode
self.uid = self.stat.st_uid
self.user = self.mapper.get_name_for_uid(self.uid)
self.gid = self.stat.st_gid
self.group = self.mapper.get_group_for_gid(self.gid)
self.size = self.stat.st_size
self.size_is_known = True
self.mtime = int(self.stat.st_mtime)
# Safe defaults.
self.copy_by_creation = False
self.copy_by_copying = False
self.size_comparison_valid = False
self.is_file = self.is_dir = self.is_link = False
self.ignore = False
self.dest_missing = False # Makes sense for a real fs object.
self.has_real_hash = False
if S_ISDIR(mode):
self.is_dir = True
# It makes no sense to 'copy' remote filesystem metadata.
self.copy_by_creation = True
self.hashstr = self.blankhash[:]
elif S_ISLNK(mode):
self.is_link = True
log.debug("Reading symlink '%s'", self.fullpath)
self.link_target = os.readlink(self.fullpath)
log.debug("Symlink '%s' -> '%s'", self.fullpath, self.link_target)
# Recreate symlinks, copy is unlikely to work as the target
# (if it's a web server) will probably not send the link
# contents, more likely it will send the target contents.
self.copy_by_creation = True
# Size comparison is not valid, we may be under a different-
# length path, so the target will be a different length.
self.has_real_hash = False
# For the same reason, the hash is meaningless.
self.hashstr = self.blankhash[:]
self.link_normalised = False
if root:
self.normalise_symlink(root)
elif S_ISREG(mode):
self.is_file = True
if defer_read:
self.hashstr = self.notsethash
else:
self.read_file_contents()
self.copy_by_copying = True
self.has_real_hash = True
self.size_comparison_valid = False
else:
self.is_dir = False
self.is_file = False
self.hashstr = self.blankhash[:]
raise UnsupportedFileTypeException(
"%s: File type '%s' is unsupported" %
(self.fullpath, self._type_to_string(mode)))
self.strhash_value = self.hash_value = None
self.hash_safe = True
return self
@classmethod
def init_from_string(cls, string, trim=False, root=''):
self = cls()
self.is_local_file = False
if log.isEnabledFor(logging.DEBUG):
log.debug("init_from_string: %s", string)
(md, smode, user, group, mtime, size, fpath) = string.split(None, 6)
self.hashstr = md
mode = self.mode = int(smode, 8)
self.user = user
self.uid = self.mapper.get_uid_for_name(user)
self.group = group
self.gid = self.mapper.get_gid_for_group(group)
# Robustness principle - old versions have float mtime.
self.mtime = int(float(mtime))
self.size = int(size) # XXX int length?
self.size_is_known = True
self.fpath = fpath
opath = fpath
if trim:
opath = os.path.join(root, opath)
self.fullpath = opath
self.copy_by_creation = False
self.copy_by_copying = False
self.size_comparison_valid = False
self.ignore = False
self.dest_missing = True # Safe default.
# self.contents_differ = False
self.has_real_hash = False
self.is_dir = self.is_link = self.is_file = False
if S_ISDIR(mode):
self.is_dir = True
self.copy_by_creation = True
elif S_ISLNK(mode):
self.is_link = True
self.has_real_hash = True
if '>>>' not in fpath:
raise BadSymlinkFormatError(
"%s: Expected '>>>' in symlink hash" % fpath)
(link, target) = fpath.split('>>>', 1)
if not link or not target:
raise BadSymlinkFormatError(
"%s: Bogus symlink hash" % fpath)
target = target.lstrip() # Optional whitespace right of '>>>'.
self.fpath = link
self.link_target = target
# Size comparisons are not valid because we are likely to be
# copied into a different-length path, implying that the link
# target has different length.
self.has_real_hash = False
# For the same reason, the hash is meaningless.
self.hashstr = self.blankhash[:]
# For the same reason again, we just create links.
self.copy_by_creation = True
if root:
self.normalise_symlink(root)
elif S_ISREG(mode):
self.is_file = True
self.has_real_hash = True
self.copy_by_copying = True
self.size_comparison_valid = True
else:
self.is_dir = False
self.is_file = False
self.hashstr = self.blankhash[:]
raise UnsupportedFileTypeException(
"%s: File type '%s' is unsupported" %
(self.fullpath, self._type_to_string(mode)))
self.strhash_value = self.hash_value = None
self.hash_safe = True
return self
def read_file_contents(self):
if not self.is_file:
raise UnsupportedFileTypeException(
"%s: Attempt to read contents of non-file" % self.fullpath)
log.debug("Reading file '%s' contents", self.fullpath)
self.hash_file()
self.has_read_contents = True
def safe_to_skip(self, other):
'''
Determine if we are sufficiently similar to FileHandle object other
to avoid reading the file again.
'''
log.debug("'%s': skip check", self.fpath)
if self.fpath != other.fpath:
log.debug("names differ, fail skip check")
return False
if self.size != other.size:
log.debug("sizes differ, fail skip check")
return False
if self.mtime != other.mtime:
log.debug("mtimes differ, fail skip check")
return False
log.debug("skip check pass")
return True
def inherit_attributes(self, other):
'''
Copy attributes from another FileHandle object, in an attempt to
save on IO. Don't copy stuff we don't need to.
'''
log.debug("inherit_attributes('%s'): grabbing hash", self.fpath)
self.hashstr = other.hashstr
self.has_read_contents = True
def _type_to_string(self, mode):
ftype = 'UNKNOWN'
if S_ISDIR(mode):
ftype = 'dir'
elif S_ISLNK(mode):
ftype = 'symlink'
elif S_ISREG(mode):
ftype = 'file'
elif S_ISCHR(mode) or S_ISBLK(mode):
ftype = 'device'
elif S_ISFIFO(mode):
ftype = 'pipe'
elif S_ISSOCK(mode):
ftype = 'socket'
return ftype
def sha_hash(self):
'''
Provide a stable hash for this object.
'''
assert self.hash_safe, "Hash available"
md = hashlib.sha256()
md.update(self.fpath)
md.update(str(self.mode))
md.update(self.user)
md.update(self.group)
md.update(self.hashstr)
return md.hexdigest()
def hash_file(self):
log.debug("File: %s", self.fullpath)
block_size = 10 * 1000 * 1000
f = open(self.fullpath, 'rb')
md = hashlib.sha256()
while True:
data = f.read(block_size)
if not data:
break
md.update(data)
log.debug("Hash for %s: %s", self.fpath, md.hexdigest())
self.contents_hash = md.digest()
self.hashstr = md.hexdigest()
def presentation_format(self):
fpath = self.fpath
if self.is_link:
fpath += '>>> %s' % self.link_target
return "%s %06o %s %s %s %s %s" % (
self.hashstr, self.mode,
self.user, self.group,
int(self.mtime), self.size,
fpath)
def can_compare(self, other):
if self.has_real_hash and other.has_real_hash:
return True
else:
return False
def compare_contents(self, other):
if not self.has_real_hash:
raise NotHashableException(
"%s (lhs) isn't comparable" % self.fpath)
if not other.has_real_hash:
raise NotHashableException(
"%s (rhs) isn't comparable" % other.fpath)
log.debug("compare_contents: %s, %s", self, other)
return self.hashstr == other.hashstr
def compare(self, other,
ignore_name=False,
ignore_uid_gid=False,
ignore_mode=False,
trust_mtime=True):
'''
Thorough object identity check. Return True if files are the same,
otherwise False.
'''
# Since we're comparing, let's assume the remote at least has the
# file.
self.dest_missing = False
differ_name = False
differ_metadata = False
differ_contents = False
differ_mtime = False
mtime_skip = False
if log.isEnabledFor(logging.DEBUG):
log.debug("compare: self %s other %s ignore_uid_gid %s "
"ignore_mode %s trust_mtime %s",
repr(self), repr(other),
ignore_uid_gid, ignore_mode, trust_mtime)
if not ignore_name:
if self.fpath != other.fpath:
log.debug("Object names differ")
differ_name = True
if self.size_comparison_valid and self.size != other.size:
log.debug("Object sizes differ")
if not ignore_mode:
if self.mode != other.mode:
log.debug("Object modes differ")
differ_metadata = True
if not ignore_uid_gid:
if self.uid != other.uid:
log.debug("Object UIDs differ")
differ_metadata = True
if self.gid != other.gid:
log.debug("Object GIDs differ")
differ_metadata = True
# If enabled, check the mtime and if it matches, consider ourselves
# done.
if trust_mtime:
if self.mtime == other.mtime:
log.debug("'%s': mtime match, assuming ok", self.fpath)
mtime_skip = True
else:
log.debug("'%s': mtime mismatch, will check", self.fpath)
differ_mtime = True
if not mtime_skip:
# If we're a local file, we may be able to save some time.
if self.is_local_file:
if self.defer_read and not self.has_contents():
self.hash_file()
self.has_read_contents = True
if self.can_compare(other):
if not self.compare_contents(other):
log.debug("Hash verification failed")
differ_contents = True
else:
log.debug("Hash verified")
log.debug("'%s': Identity check: name %s contents %s metadata %s "
"mtime %s", self.fpath, differ_name, differ_contents,
differ_metadata, differ_mtime)
self.contents_differ = differ_contents
self.metadata_differs = differ_metadata
self.mtime_differs = differ_mtime
differs = differ_contents or differ_metadata or \
differ_mtime or differ_name
return not differs
def normalise_symlink(self, root):
'''
Given a root directory, make symlinks that point inside our tree
relative, and mark links that point outside the tree as external.
'''
if not S_ISLNK(self.mode):
raise LinkOperationOnNonLinkError("'%s' is not a symlink" %
self.fpath)
log.debug("normalise_symlink: %s", repr(self))
self.link_normalised = True
self.link_relpath = self.source_symlink_make_relative(srcdir=root)
if self.link_relpath.startswith(os.sep):
log.warn("'%s' (symlink to '%s') points outside the tree - "
"normalising to '%s'",
self.fpath, self.link_target, self.link_relpath)
self.link_target = self.link_relpath
self.link_is_external = True
else:
self.link_is_external = False
def source_symlink_make_relative(self, srcdir, absolute_is_error=False):
'''
On the source side, find if a symlink is under srcdir, and
if it is, make it relative. If not, make it absolute.
Returns the best path we can make. If it starts with a '/', it's
absolute and so is out of the tree.
If absolute_is_error, throw an Error for non-absolute results.
srcdir must be absolute. We need to start somewhere.
Throws an Error if not a symlink.
'''
if not srcdir.startswith(os.sep):
raise PathMustBeAbsoluteError(
"'%s' must be an absolute path" % srcdir)
if not S_ISLNK(self.mode):
raise LinkOperationOnNonLinkError("'%s' is not a symlink" %
self.fpath)
log.debug("source_symlink_make_relative: %s, %s",
self.fpath, self.link_target)
topdir = os.path.normpath(srcdir)
if not topdir.endswith(os.sep):
topdir += os.sep
tpath = self.link_target
spath_full = os.path.join(topdir, self.fpath)
norm_spath_full = os.path.normpath(spath_full)
norm_lpath_full = os.path.dirname(norm_spath_full)
# This is the location of the symlink (dirname fpath) plus the link
# target.
tpath_full = os.path.join(topdir, norm_lpath_full, self.link_target)
norm_tpath_full = os.path.normpath(tpath_full)
# norm_tpath_full)
if norm_tpath_full.startswith(topdir):
# We're under the source path -> relative link.
tpath_rel = os.path.relpath(norm_tpath_full, norm_lpath_full)
log.debug("link relative: '%s' -> '%s'", tpath_full, tpath_rel)
return tpath_rel
else:
if absolute_is_error:
raise SymlinkPointsOutsideTreeError(
"'%s' points outside the file tree "
"('%s', normalised to '%s') "
"and absolute links are disabled" %
(self.fpath, self.link_target, norm_tpath_full))
# We're not under the source path -> absolute link.
log.debug("link cannot be made relative : '%s' -> '%s'",
tpath, norm_tpath_full)
return norm_tpath_full
def __str__(self):
return self.presentation_format()
def __repr__(self):
if self.is_link:
fpath = '%s>>>%s' % (self.fpath, self.link_target)
else:
fpath = self.fpath
return "[FileHash: fullpath %s fpath %s size %d " \
"mode %06o mtime %d uid %d gid %d hashstr %s]" % (
self.fullpath, fpath,
self.size, self.mode,
self.mtime,
self.uid, self.gid, self.hashstr)
def _debug_repr(fh):
# Convenient debugging representation.
return ','.join([fh.fpath, '%06o' % fh.mode, str(fh.mtime),
fh.user, fh.group, str(fh.size)])
# These comparisons are for the purposes of object storage. They're
# not effective for comparing the files when determining whether or
# not to copy them; use compare() for that.
def hash(self):
if self.hash_value is None:
self.hash_value = (self.fullpath, self.size, self.mode,
self.mtime, self.uid, self.gid, self.hashstr)
return self.hash_value
def strhash(self):
if self.strhash_value is None:
h = hashlib.md5(str(self.hash()))
self.strhash_value = h.digest().encode('base64')
return self.strhash_value
def __hash__(self):
return self.strhash()
def __eq__(self, other):
return self.hash() == other.hash()
| |
#!/usr/bin/python2.7
import base64
import Tkinter
# This is a base64 encoded GIF image. One neat trick with using Python to create GUIs
# is you can embed your graphics into the code so anything you want to display doesn't
# need to be pre-loaded or downloaded separately. You can only use GIFs in this way.
gifBase = '''
R0lGODlhQABAAPcAAO3t7RoaGg8PDw4ODqOho9TS1NbV1q2sraemp6alprSztRIRFBMTFw8PERERExUVFxoaHA4ODxMTFBUVFhkZGg0OFRARFxgZHx8hKxka
HxwdIhcaJhASGg4PEx0fJxARFR0eIiIjJwsNFBMWIRodKBwfKhIVHxQZKBUXHRkbIQ8QExESFRYXGhcYGxgZHBobHh8gIwwOEyMlKhMVGSAiJhMUFhscHiQl
JxETFhcZHBgaHRESExwdHhgZGRkaGh0eHiEiIicoKCUmJsPExMHCwqWmpg8QD7u8uyIiHhoaGaurqpWVlP/2Tv/yWv7uY/3nS/7jO/zkV9a3JbqcGb2eIvHN
N/3eXRwbFx4dGcumJeW9KxoYEdeqGoVpEYttEqCAFsOdHLeRGo9zFZp7GKeFG4RqGdWsKm1ZF9mxNu3HUM6cEpRzEM+hGndcD4FkE2BLD3xiFE8+DXRcFVRD
FFxJFlhHFy4lDdOtQ9+4TCciExoXDiQhGcmSD8CPEItnDKuAEJFtDsmVFdCaGG5TDltFDMWTG8uYHpJvF0k4DYxsGmVQGVA/FDktD59+K0AzEk4/GEc5Fr2Y
PCkhDkE1GDowFi0mFDgwHBUTDhcVECspJLqHD6Z6FaV9IcybKltFE5BvJEs6FGBLGjMoDolrJ5h4LaiGN0k6GSQdDjUsFx8aDyEeF0c1Eo9wL2pTJHxiLVVD
IIltNUY5HjIpFikmIBwaFl5JIU4+ISokGRMRDxUTERcVExEQDxMSEScmJRoZGdbV1bOysuzs7Orq6unp6eHh4dra2tbW1tXV1dTU1NPT09HR0c7OzsvLy8nJ
ycbGxr6+vry8vLu7u7q6uri4uLe3t7W1ta+vr6ioqKWlpaSkpKOjo6KioqCgoJ2dnZmZmY+Pj35+fnl5eXV1dXJycm9vb2xsbGpqamRkZFpaWllZWSoqKiYm
JiUlJSQkJCIiIiEhIR8fHx4eHhwcHBsbGxcXFxYWFhUVFRMTExISEhEREQwMDAsLCwkJCQcHBwUFBf///yH5BAEAAP8ALAAAAABAAEAAAAj/AAEIHEiwoMGD
CBMqXMiwocOHECNKnEixosWLGDNq3HgwGLFm0EKKHEmypMmTKFNCI1IsmMBg0pZwm8lt27Zs2BBcu6bkwAFr1oApoEZt2jRp0kY+C3kEqbRpRKspAAb0gJKd
CbBls0mT25IlwgQKu0qtGtGj0aA9e0ZkyBBnzZoxW0ZXmbJkyJAd28sXWYFkdukuY8asmTNnQ4isfRbtaVRqB8INE8uzadrFbd/GnbvMbl5joIuJHk26mDED
xwAro0vYsFsiipc2jmZNslhr4L7p3v31q7ff3rp1m7mNgDac2BIgWI5AeYKs2bRp2zZTeLffvXvv3m0bgDAE9urZ/xtPvrz58+glnJfAXsK9e+7dv59P/x6+
cZO9b+PRzp3//+684987BApYIDwIJqjgggnG4yAPEMoDYYTyVGghD/SUk58w28zzToIfMrjggxOWyIOFKKY4z4osrhjAizC+WI+GYm1DQYQn5jhPihYGMI+P
QP4Y44sUBFBkkT1QQE8PS9LjpJP1PPmkBDTqN08QPwTBgzrxBBHPOvC0A4Q77byzTjxCyLOOmmryB4878AgYTztsrhPAOvOwI087PIToDg/uzNNOAO1QsE6G
+Q3DTQDsQPAOBOzwByg8PNjw5pwBwNPCOy7Q6Y48P7wgjw0Q/GDDp+34oKmn8rxToQ02vP+jqgvutAAPC+3QY06i3PQAgyWlgAJDAO5kioUrn+wRTwAaxFJK
KMFqQA88qsAChyujtEJKHiDQAwKwoEgLDz08XIGtKi680kglKaBSyioaUMlrADQowkQZNPxwZgi3oFGFJTDYkEIZTATCRCIpuECDLFVooUUVZkAxSQo21Htv
CjlECsMe/qKSQiNPzLFBKEy4kcIDVSrqLSWdLKIBBfAEwEMAn5CBBQSxjEJJHZXUQYkroLxwBSeHTAIJJY/knMcFLL/hCiQQ0DMPPZ9scgULoRhSCQqodOL0
KilzM48MijRRhgw80PDCDbfc8a8GjTAxxwkonGBKE2QsnAYaSIz/cMEIcdNBgiJRBIK3BjmEsIfblsD9RB2DF473rgKpbAMlhixyARatQB1AIjbnEEohkLRC
SeeFvIHC0IfYUkcstThyhipMG+KJIo7UAkkL9CRiNQt0aM21IXG8YUfYYyviRBkYTBJFFr20/S8IpERhSBOCOCGIFWSEIEsaWqRRBRpPTHKCDWSbfQIlTWQB
AxaMw93EHCN40gQcI3QQdj0vUMLGIijYQxjg0AIfHMJmKBjdHMqwiDLMoRCKWN0mDqGIM9ABDqh4BC1a4D8AqgIOdZgEJRLxhy18gBB8OEUM7BAIRHRgAMgj
2/JKQAkrUEFxjKNeFOZQghxgwBRW+IL3/9JghlmkQAMpIEUT6lCCUCxPBhDAwC3AdwctVGIGn3hCHOoXhTZoIB/76x8ZHmEJVIzhDKiQBRzEoAoUKIINstAB
PVwgCzacAQVYIMMYkhAAerTgjbJAgSO4MAcU1GMCmOhCGwDxh0vE4A1qkIQIRKEGFw6AcgBQ1DzUEUVTOGEMHphiETXgDiCwYocykIcMgDiGEFQCD1nIBAyA
8I5TLqIEtYgCHDTgAHjcIweYiIQVZzCKJyDCBIvoYgrAyKtpUSADqKDCHVWhRyz8SAeKoAIlXtADF0RzmtWEQDeziQoOMGIKceiAAO6Rjwbwwgtf0EMH3jAF
O1TgnHFQQQQwqf9JdciDk5mAgD97MSf/tOMHvYBAEDZJgUxQIAgQoFOZBASzHcCjAReFRz58KQ97UKAeDwiABLDQS4wK4B1GCFsP3uGhecDDpRKSUDzk8dKY
2TQAy4rHPE4kjwCEJx/2yAc98vGABtQDqPiQjwTwEdQJNIAeRpiAEeohAH5yox5MkpJWtVqPrpanqxLIxwD0QdaymvWsZ92HWte6D33so0ockpqQgDSkGFHg
rklSkj3wIYC++vWvfR2AYAdL2MISVh9w3QY95NEiF9UVRkeiBzsBS9m/GvayhUXshhTrWLoW6bE9OKoABkBZ0pp2tKPF7GENq9ka9aCmLo3tTHmaIyf/5UMC
RsBtWNnJznzcwwi/lYAAhHuPdQoAH0bIh3L5io8I3GMAz31u2HwgA/TZgEvqgEcINLCOdrCjUReoATzs4Q4JpOADL8BBBhiQgxkEQB7TIq95P3De9I6rp+99
wAVWkAIJvOMe7sBH2Pbwikq84hY2eG8AYjEJH4h0B6cAxSWIewlISGISdqCEKGThCCPsALrQxQQoTrEKSTjCDg0wggDyIQAHkDjCeviwdHnliiYUwgmJwMAL
gvA9NNyiBUoihRZTcA8NrKIJarACF8DnL1m4YAI6qEEK6NAEPlghyWCAAA7k4QsWzABkhhtECvDhDmZWjhuOIMMbxmiJW0Dg/wp/WMMuHGCEeUbyAy0+xRje
4IVBuGGRjVSBHhixCxXY4QuEWAOf3yAJSaw4FXaIwxgI8QVRqADEVm2BDFBQAk+OAQSngKUqZuDlYiKCyO5wgA06YAMVoCCYw8ziqVVwgRjUGgWpIOIedFGK
KojCBBW4QAPekY8yh40WqMDCLSjBhUHEABNfEEMqUoGJVNDTDh/ILXB924Bpe4EM8rz2B3YhCVyc4hKpOEW0cdEBOETyFLiQxC6KW1yrtiINm8B3Grrwgh0E
QA+RMEMptCAKClgUH8TWaAZScQcqqGIC9BBpAByQgUXo+w94+IIvFuACXZACDZzQNyFQcNKU8ooOUv/wgxn8wAU4fGAHK4D2FMbABUmsIB9HPao9HLCFVISB
DJi4uT1wkYoGrAARKc8CIMAgBkzkwgEL8AIbAJEFP0gBEVsQgD2qyit73CIfexAFHtrgAnzMowa02AIrzGCHHOCDBxvNxwtOcQcvqEIXH53ABFqBBkYIew8C
ADwWUhEJMtDiHqNgOxYagAVH4IEQOhBA2IzAAgeYUwtdYMA96lEDB/BiDARnAD7ogY8e4KMGktDCFySwgvHAxw1VcMQMGqAL2jdgB6kwAxi2sAPQ22EBDXAA
IqowCAYYwar4kEc+WmAHNAyi8j3ABCs4IYk83CMA+ZiHEeQhABekohNukCz/PewhAXpgohL5YAEjIjGIUrShFHLIQyoeUIMtnKIW7I+EIyqBiwDAkFdmdw8s
sApQMAYPIAEssAVZoAV5wAE1EADXdw/zIICiUAVhMH5OYg89gID3MAOeAAWaUAV9YIETsAJOIgEWcAggCAVvwAEOMA9mlkncYAQBIAA9gAuOkApDRQ8OYAei
IAFHBVUUIAA12ANGQAmn8ABMZR9BRQ82eAmOoAdRyAinMAG3ZR81kApR6Ai4QA8D0AP/d2b3AA8SEA8SkAESwAP2AF9e1gPzUA8dxQMS4EtmmAMSAF8B0ANu
uIb24EsX4AB/eAF3WA9GIjU1sF8XsAPxMIYCxiun/6VapgWJq6UPY1WJlHiJl2iJblWJbfVWmyUARqBioTiKlWVZqEVafiWJkFhWluiJNWIE+KBcsjiLtCiL
o3iLoKhipZhaqnhYicUPbMVW/DCMxFiM/NAPyJiMyriMyugPzoiM/rCMzhiNz+gPKZMA50AO2riN2jgO3uiN4hCO4RgO5FiO5niO5yiO4fiN3siN2lgO5HAO
4pAoSpANQBEUQkEU0hANa9EWcCEXdZEMybAXvwAaBnkMyCCQqzEYhYEYsQEN0hAVwEAVQIEA3TEMStAN0ZENRZAAPPET+GgW03AZbOEWhxEXKBkXh4EYibEW
0BANUFENE2kNPrETCKAVx3TRDd0hDNbwDb3xG9ZRE9mgHDvRE0AxkdVgFmdhFFARFVNBkVZxFdeQAFvBDdYBHF/xDeAQFgAQDMcADumADmI5lmRZlmZ5lmiZ
lmmZDuegBC4hFseQknI5l3RZl3Z5l3TJDMTAEXzZl375l4AZmII5mIEZEAA7
'''
# Our GUI is going to be made into a class. If you're writing something simpler you can
# do without the class structure.
class App:
def __init__(self):
# This first line initializes our GUI.
self.root = Tkinter.Tk()
# This line will force our window on top of all other windows and remain there.
self.root.call('wm', 'attributes', '.', '-topmost', True)
# The title in our title bar.
self.root.title("Log Uploader")
# This gray will help our GUI look like a part of OS X. Here we will set the
# overall background to this color. For each of the widgets below we will be
# setting their background colors to match as well.
bgColor = "#EDEDED"
self.root.configure(bg=bgColor)
# This dictionary will be an attribute for our GUI class. We'll use it for
# capturing the integer values from the Checkbuttons below.
self.Values = {}
# Here we are loading our base64 image data and placing it in our window using
# the Label widget. You will also see the grid method. There are three styles
# of window management in Tkinter. Using grid allows us to place widgets according
# to X,Y coordinates which makes designing your GUI as easy as plotting it out on
# a sheet paper before writing your code.
gif = Tkinter.PhotoImage(data = gifBase)
displayGif = Tkinter.Label(self.root, image = gif, borderwidth = 10, bg = bgColor).grid(row = 0, rowspan = 6, columnspan = 2)
# This Label widget displays text. I've defined the font and size I want it
# displayed. I'm also using additional parameters to add padding around the widget
# and its alignment within the grid space.
Tkinter.Label(self.root, text = "Select the logs you wish to upload:", font=("Helvetica Neue Bold", 14), bg = bgColor).grid(row = 0, column = 2, columnspan = 3, padx = (0, 10), pady = (10, 5), sticky = 'w')
# IntVar is a type of Tkinter variable. Tkinter widgets cannot interact with the
# standard Python variables. Here we are storing the variables for each of our
# Checkbuttons (which return a 0 or a 1).
self.Values['BoxSync'] = Tkinter.IntVar()
# In the Checkbutton widget I've set its variable to be equal to the IntVar I
# created in the line above. This means the value of this variable will change
# based upon the state of the Checkbutton.
Tkinter.Checkbutton(self.root, text = "Box Sync", font=("Helvetica Neue", 14), variable = self.Values['BoxSync'], bg = bgColor).grid(row = 1, column = 2, sticky = 'w')
self.Values['CrashPlan'] = Tkinter.IntVar()
Tkinter.Checkbutton(self.root, text = "CrashPlan PROe", font=("Helvetica Neue", 14), variable = self.Values['CrashPlan'], bg = bgColor).grid(row = 1, column = 3, columnspan = 2, padx = (0, 10), sticky = 'w')
self.Values['HipChat'] = Tkinter.IntVar()
Tkinter.Checkbutton(self.root, text = "HipChat", font=("Helvetica Neue", 14), variable = self.Values['HipChat'], bg = bgColor).grid(row = 2, column = 2, sticky = 'w')
self.Values['Install'] = Tkinter.IntVar()
Tkinter.Checkbutton(self.root, text = "Install Logs", font=("Helvetica Neue", 14), variable = self.Values['Install'], bg = bgColor).grid(row = 2, column = 3, columnspan = 2, padx = (0, 10), sticky = 'w')
self.Values['System'] = Tkinter.IntVar()
Tkinter.Checkbutton(self.root, text = "System", font=("Helvetica Neue", 14), variable = self.Values['System'], bg = bgColor).grid(row = 3, column = 2, sticky = 'w')
Tkinter.Label(self.root, text = "Are you updating an existing ticket?", font=("Helvetica Neue Bold", 14), bg = bgColor).grid(row = 4, column = 2, columnspan = 4, padx = 10, sticky = 'w')
Tkinter.Label(self.root, text = "Ticket Number:", font=("Helvetica Neue", 14), bg = bgColor).grid(row = 5, column = 2, padx = 10, sticky = 'w')
# Just like out Checkbuttons, we will be setting a StringVar to populate the value
# for our Entry box.
self.TicketNumber = Tkinter.StringVar()
Tkinter.Entry(self.root, textvariable = self.TicketNumber, justify = 'right', width = 10, font=("Helvetica Neue", 14), highlightbackground = bgColor).grid(row = 5, column = 3, columnspan = 2, sticky = 'w')
Tkinter.Label(self.root, text = "Add a comment:", font=("Helvetica Neue Bold", 14), bg = bgColor).grid(row = 6, column = 1, columnspan = 4, padx = 10, sticky = 'w')
# Think of the Frame widget just as the HTML equivalent. We are defining an area
# of our GUI window to populate widgets into. In this case I am using a Frame to
# embed a Comment widget into. Without the Frame the Comment fills the window
# edge to edge and is less attractive.
Frame1 = Tkinter.Frame(self.root, borderwidth = 1, relief = 'sunken')
# The Comment widget is a multi-line text input field. Here you see I am setting
# the parent to Frame1 and not self.root and also using the pack method. Pack
# will draw a widget in the center of the parent. While it is usually not a good
# practice to mix different window manager methods it makes sense here as I am
# populating a single widget into a frame that will be occupying an area of my
# grid layout.
self.Comment = Tkinter.Text(Frame1, width = 60, height = 5, highlightbackground = bgColor, highlightcolor = "#7BAEDC", wrap = Tkinter.WORD, font=("Helvetica Neue", 12))
self.Comment.pack()
# The Frame is placed only after all widgets have been added to it.
Frame1.grid(row = 7, column = 1, columnspan = 5, padx = 5)
# The two buttons for triggering actions in our GUI will be drawn in the lower-
# right of the windows just like any other OS X GUi. Instead of a variable we are
# setting a command to execute when clicked. For both buttons we are calling
# functions that are defined below.
Tkinter.Button(self.root, text = "Exit", highlightbackground = bgColor, command = self.Exit).grid(row = 8, column = 3, pady = (5, 5), sticky = 'e')
Tkinter.Button(self.root, text = "Submit", highlightbackground = bgColor, command = self.Submit).grid(row = 8, column = 4, pady = (5, 5), sticky = 'e')
# The below code is a workaround that allows us to determine the window size in
# pixels and then position the window wherever we want before drawing it.
self.root.withdraw()
self.root.update_idletasks()
# These lines will position this window in the middle of the screen horizontally
# and two thirds of the way up vertically (a position I prefer as it is a little
# more catching to the eye and exact centering)
x = (self.root.winfo_screenwidth() - self.root.winfo_reqwidth()) / 2
y = (self.root.winfo_screenheight() - self.root.winfo_reqheight()) / 3
self.root.geometry("+{0}+{1}".format(x, y))
# This will prevent the user from resizing the window. Useful when you want to
# be in control of your GUI's appearance.
self.root.resizable(False,False)
# We will now draw the window by calling the mainloop method.
# In some cases you may want to have the mainloop method be called outside of
# your GUI's class and within a main function.
self.root.deiconify()
self.root.mainloop()
def Exit(self):
# Calling the destroy() method on our GUI will close it.
self.root.destroy()
def Submit(self):
# Here you would write your code to take action upon the user input collected.
# In this example you can look at the Terminal to see output based upon input
# and selections made.
print("The user clicked the 'Submit' button.")
for key, value in self.Values.iteritems():
if key == 'BoxSync' and value.get():
print("The user selected 'Box Sync' logs.")
if key == 'CrashPlan' and value.get():
print("The user selected 'CrashPlan PROe' logs.")
if key == 'HipChat' and value.get():
print("The user selected 'HipChat' logs.")
if key == 'Install' and value.get():
print("The user selected 'Install' logs.")
if key == 'System' and value.get():
print("The user selected 'System' logs.")
if self.TicketNumber.get():
print("The user entered a ticket number: {0}".format(self.TicketNumber.get()))
# A Tkinter Comment is different from an Entry box. The code below will read all
# characters from the first to the last. There are a lot of advanced options for
# reading input from a Comment making it a very versatile means of user input.
TextField = self.Comment.get('1.0', 'end')
if TextField.rstrip():
print("The user entered the following comment:\n\n{0}".format(TextField))
# Calling the class will execute our GUI.
App()
| |
#!/usr/bin/env python
"""
This script is mainly for running autotests on the build server, however, it
can also be used by engineers to run the tests locally on their machines.
It takes as optional parameters the path to the folder containing the test
executables (which must have names ending in _tests), and a list of tests that
need to be skipped, this list must be comma separated and contain no spaces. E.g.:
./run_desktop_tests.py -f ./omim-build-release -e drape_tests,some_other_tests
The script outputs the console output of the tests. It also checks the error
code of each test suite, and after all the tests are executed, it prints the
list of the failed tests, passed tests, skipped tests and tests that could not
be found, i.e. the tests that were specified in the skip list, but do not exist.
"""
from __future__ import print_function
from optparse import OptionParser
from os import listdir, remove
from random import shuffle
import random
import socket
import subprocess
import testserver
import time
import urllib2
import logging
TO_RUN = "to_run"
SKIP = "skip"
NOT_FOUND = "not_found"
FAILED = "failed"
PASSED = "passed"
WITH_SERVER = "with_server"
PORT = 34568
TESTS_REQUIRING_SERVER = ["downloader_tests", "storage_tests", "partners_api_tests"]
class TestRunner:
def print_pretty(self, result, tests):
if not tests:
return
logging.info("\n{result}".format(result=result.upper()))
for test in tests:
logging.info("- {test}".format(test=test))
def set_global_vars(self):
parser = OptionParser()
parser.add_option("-o", "--output", dest="output", default="testlog.log", help="resulting log file. Default testlog.log")
parser.add_option("-f", "--folder", dest="folder", default="omim-build-release/out/release", help="specify the folder where the tests reside (absolute path or relative to the location of this script)")
parser.add_option("-d", "--data_path", dest="data_path", help="Path to data files (passed to the test executables as --data_path=<value>)")
parser.add_option("-u", "--user_resource_path", dest="resource_path", help="Path to resources, styles and classificators (passed to the test executables as --user_resource_path=<value>)")
parser.add_option("-i", "--include", dest="runlist", action="append", default=[], help="Include test into execution, comma separated list with no spaces or individual tests, or both. E.g.: -i one -i two -i three,four,five")
parser.add_option("-e", "--exclude", dest="skiplist", action="append", default=[], help="Exclude test from execution, comma separated list with no spaces or individual tests, or both. E.g.: -i one -i two -i three,four,five")
parser.add_option("-b", "--boost_tests", dest="boost_tests", action="store_true", default=False, help="Treat all the tests as boost tests (their output is different and it must be processed differently).")
parser.add_option("-k", "--keep_alive", dest="keep_alive", action="store_true", default=False, help="Keep the server alive after the end of the test. Because the server sometimes fails to start, this reduces the probability of false test failures on CI servers.")
(options, args) = parser.parse_args()
self.skiplist = set()
self.runlist = list()
for tests in options.skiplist:
for test in tests.split(","):
self.skiplist.add(test)
for tests in options.runlist:
self.runlist.extend(tests.split(","))
self.boost_tests = options.boost_tests
if self.runlist:
logging.warn("-i or -b option found, the -e option will be ignored")
self.workspace_path = options.folder
self.logfile = options.output
self.data_path = (" --data_path={0}".format(options.data_path) if options.data_path else "")
self.user_resource_path = (" --user_resource_path={0}".format(options.resource_path) if options.resource_path else "")
self.keep_alive = options.keep_alive
def start_server(self):
server = testserver.TestServer()
server.start_serving()
time.sleep(3)
def stop_server(self):
if self.keep_alive:
return
try:
urllib2.urlopen('http://localhost:{port}/kill'.format(port=PORT), timeout=5)
except (urllib2.URLError, socket.timeout):
logging.info("Failed to stop the server...")
def categorize_tests(self):
tests_to_run = list()
local_skiplist = list()
not_found = list()
test_files_in_dir = filter(lambda x: x.endswith("_tests"), listdir(self.workspace_path))
on_disk = lambda x: x in test_files_in_dir
not_on_disk = lambda x : not on_disk(x)
if not self.runlist:
local_skiplist = filter(on_disk, self.skiplist)
not_found = filter(not_on_disk, self.skiplist)
tests_to_run = filter(lambda x: x not in local_skiplist, test_files_in_dir)
else:
tests_to_run = filter(on_disk, self.runlist)
shuffle(tests_to_run)
not_found = filter(not_on_disk, self.runlist)
# now let's move the tests that need a server either to the beginning or the end of the tests_to_run list
tests_with_server = list(TESTS_REQUIRING_SERVER)
for test in TESTS_REQUIRING_SERVER:
if test in tests_to_run:
tests_to_run.remove(test)
else:
tests_with_server.remove(test)
return {TO_RUN:tests_to_run, SKIP:local_skiplist, NOT_FOUND:not_found, WITH_SERVER:tests_with_server}
def test_file_with_keys(self, test_file):
boost_keys = " --report_format=xml --report_level=detailed --log_level=test_suite --log_format=xml " if self.boost_tests else ""
return "{test_file}{boost_keys}{data}{resources}".format(test_file=test_file, boost_keys=boost_keys, data=self.data_path, resources=self.user_resource_path)
def run_tests(self, tests_to_run):
failed = list()
passed = list()
for test_file in tests_to_run:
self.log_exec_file(test_file)
test_file_with_keys = self.test_file_with_keys(test_file)
logging.info(test_file_with_keys)
process = subprocess.Popen("{tests_path}/{test_file} 2>> {logfile}".
format(tests_path=self.workspace_path, test_file=test_file_with_keys, logfile=self.logfile),
shell=True,
stdout=subprocess.PIPE)
logging.info("Pid: {0}".format(process.pid))
process.wait()
if process.returncode > 0:
failed.append(test_file)
else:
passed.append(test_file)
self.log_exec_file(test_file, result=process.returncode)
return {FAILED: failed, PASSED: passed}
def log_exec_file(self, filename, result=None):
if self.boost_tests:
return
logstring = "BEGIN" if result is None else "END" #can be 0 or None. If we omit the explicit check for None, we get wrong result
resstring = (" | result: {returncode}".format(returncode=result) if result is not None else "")
with open(self.logfile, "a") as logf:
logf.write("\n{logstring}: {filename}{resstring}\n".format(logstring=logstring, filename=filename, resstring=resstring))
def rm_log_file(self):
try:
remove(self.logfile)
except OSError:
pass
def __init__(self):
self.set_global_vars()
self.rm_log_file()
def merge_dicts_of_lists(self, one, two):
if not one:
return two
if not two:
return one
ret = one.copy()
for key, value in two.iteritems():
if key in one:
ret[key] = ret[key].append(two[key])
else:
ret[key] = two[key]
return ret
def execute(self):
categorized_tests = self.categorize_tests()
to_run_and_with_server_keys = [TO_RUN, WITH_SERVER]
random.shuffle(to_run_and_with_server_keys)
results = dict()
for key in to_run_and_with_server_keys:
if key == WITH_SERVER and categorized_tests[WITH_SERVER]:
self.start_server()
results = self.merge_dicts_of_lists(results, self.run_tests(categorized_tests[key]))
if key == WITH_SERVER and categorized_tests[WITH_SERVER]:
self.stop_server()
self.print_pretty("failed", results[FAILED])
self.print_pretty("skipped", categorized_tests[SKIP])
self.print_pretty("passed", results[PASSED])
self.print_pretty("not found", categorized_tests[NOT_FOUND])
def tests_on_disk(path):
return filter(lambda x: x.endswith("_tests"), listdir(path))
if __name__ == "__main__":
runner = TestRunner()
runner.execute()
| |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes to load KITTI and Cityscapes data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import re
from absl import logging
import numpy as np
import scipy.misc
CITYSCAPES_CROP_BOTTOM = True # Crop bottom 25% to remove the car hood.
CITYSCAPES_CROP_PCT = 0.75
CITYSCAPES_SAMPLE_EVERY = 2 # Sample every 2 frames to match KITTI frame rate.
BIKE_SAMPLE_EVERY = 6 # 5fps, since the bike's motion is slower.
class Bike(object):
"""Load bike video frames."""
def __init__(self,
dataset_dir,
img_height=128,
img_width=416,
seq_length=3,
sample_every=BIKE_SAMPLE_EVERY):
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.sample_every = sample_every
self.frames = self.collect_frames()
self.num_frames = len(self.frames)
self.num_train = self.num_frames
logging.info('Total frames collected: %d', self.num_frames)
def collect_frames(self):
"""Create a list of unique ids for available frames."""
video_list = os.listdir(self.dataset_dir)
logging.info('video_list: %s', video_list)
frames = []
for video in video_list:
im_files = glob.glob(os.path.join(self.dataset_dir, video, '*.jpg'))
im_files = sorted(im_files, key=natural_keys)
# Adding 3 crops of the video.
frames.extend(['A' + video + '/' + os.path.basename(f) for f in im_files])
frames.extend(['B' + video + '/' + os.path.basename(f) for f in im_files])
frames.extend(['C' + video + '/' + os.path.basename(f) for f in im_files])
return frames
def get_example_with_index(self, target_index):
if not self.is_valid_sample(target_index):
return False
example = self.load_example(target_index)
return example
def load_intrinsics(self, unused_frame_idx, cy):
"""Load intrinsics."""
# https://www.wired.com/2013/05/calculating-the-angular-view-of-an-iphone/
# https://codeyarns.com/2015/09/08/how-to-compute-intrinsic-camera-matrix-for-a-camera/
# https://stackoverflow.com/questions/39992968/how-to-calculate-field-of-view-of-the-camera-from-camera-intrinsic-matrix
# # iPhone: These numbers are for images with resolution 720 x 1280.
# Assuming FOV = 50.9 => fx = (1280 // 2) / math.tan(fov / 2) = 1344.8
intrinsics = np.array([[1344.8, 0, 1280 // 2],
[0, 1344.8, cy],
[0, 0, 1.0]])
return intrinsics
def is_valid_sample(self, target_index):
"""Checks whether we can find a valid sequence around this frame."""
target_video, _ = self.frames[target_index].split('/')
start_index, end_index = get_seq_start_end(target_index,
self.seq_length,
self.sample_every)
if start_index < 0 or end_index >= self.num_frames:
return False
start_video, _ = self.frames[start_index].split('/')
end_video, _ = self.frames[end_index].split('/')
if target_video == start_video and target_video == end_video:
return True
return False
def load_image_raw(self, frame_id):
"""Reads the image and crops it according to first letter of frame_id."""
crop_type = frame_id[0]
img_file = os.path.join(self.dataset_dir, frame_id[1:])
img = scipy.misc.imread(img_file)
allowed_height = int(img.shape[1] * self.img_height / self.img_width)
# Starting height for the middle crop.
mid_crop_top = int(img.shape[0] / 2 - allowed_height / 2)
# How much to go up or down to get the other two crops.
height_var = int(mid_crop_top / 3)
if crop_type == 'A':
crop_top = mid_crop_top - height_var
cy = allowed_height / 2 + height_var
elif crop_type == 'B':
crop_top = mid_crop_top
cy = allowed_height / 2
elif crop_type == 'C':
crop_top = mid_crop_top + height_var
cy = allowed_height / 2 - height_var
else:
raise ValueError('Unknown crop_type: %s' % crop_type)
crop_bottom = crop_top + allowed_height + 1
return img[crop_top:crop_bottom, :, :], cy
def load_image_sequence(self, target_index):
"""Returns a list of images around target index."""
start_index, end_index = get_seq_start_end(target_index,
self.seq_length,
self.sample_every)
image_seq = []
for idx in range(start_index, end_index + 1, self.sample_every):
frame_id = self.frames[idx]
img, cy = self.load_image_raw(frame_id)
if idx == target_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y, cy
def load_example(self, target_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y, cy = self.load_image_sequence(target_index)
target_video, target_filename = self.frames[target_index].split('/')
# Put A, B, C at the end for better shuffling.
target_video = target_video[1:] + target_video[0]
intrinsics = self.load_intrinsics(target_index, cy)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_video
example['file_name'] = target_filename.split('.')[0]
return example
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class KittiRaw(object):
"""Reads KITTI raw data files."""
def __init__(self,
dataset_dir,
split,
load_pose=False,
img_height=128,
img_width=416,
seq_length=3):
static_frames_file = 'dataset/kitti/static_frames.txt'
test_scene_file = 'dataset/kitti/test_scenes_' + split + '.txt'
with open(get_resource_path(test_scene_file), 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.load_pose = load_pose
self.cam_ids = ['02', '03']
self.date_list = [
'2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03'
]
self.collect_static_frames(static_frames_file)
self.collect_train_frames()
def collect_static_frames(self, static_frames_file):
with open(get_resource_path(static_frames_file), 'r') as f:
frames = f.readlines()
self.static_frames = []
for fr in frames:
if fr == '\n':
continue
unused_date, drive, frame_id = fr.split(' ')
fid = '%.10d' % (np.int(frame_id[:-1]))
for cam_id in self.cam_ids:
self.static_frames.append(drive + ' ' + cam_id + ' ' + fid)
def collect_train_frames(self):
"""Creates a list of training frames."""
all_frames = []
for date in self.date_list:
date_dir = os.path.join(self.dataset_dir, date)
drive_set = os.listdir(date_dir)
for dr in drive_set:
drive_dir = os.path.join(date_dir, dr)
if os.path.isdir(drive_dir):
if dr[:-5] in self.test_scenes:
continue
for cam in self.cam_ids:
img_dir = os.path.join(drive_dir, 'image_' + cam, 'data')
num_frames = len(glob.glob(img_dir + '/*[0-9].png'))
for i in range(num_frames):
frame_id = '%.10d' % i
all_frames.append(dr + ' ' + cam + ' ' + frame_id)
for s in self.static_frames:
try:
all_frames.remove(s)
except ValueError:
pass
self.train_frames = all_frames
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, target_index):
"""Checks whether we can find a valid sequence around this frame."""
num_frames = len(frames)
target_drive, cam_id, _ = frames[target_index].split(' ')
start_index, end_index = get_seq_start_end(target_index, self.seq_length)
if start_index < 0 or end_index >= num_frames:
return False
start_drive, start_cam_id, _ = frames[start_index].split(' ')
end_drive, end_cam_id, _ = frames[end_index].split(' ')
if (target_drive == start_drive and target_drive == end_drive and
cam_id == start_cam_id and cam_id == end_cam_id):
return True
return False
def get_example_with_index(self, target_index):
if not self.is_valid_sample(self.train_frames, target_index):
return False
example = self.load_example(self.train_frames, target_index)
return example
def load_image_sequence(self, frames, target_index):
"""Returns a sequence with requested target frame."""
start_index, end_index = get_seq_start_end(target_index, self.seq_length)
image_seq = []
for index in range(start_index, end_index + 1):
drive, cam_id, frame_id = frames[index].split(' ')
img = self.load_image_raw(drive, cam_id, frame_id)
if index == target_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_pose_sequence(self, frames, target_index):
"""Returns a sequence of pose vectors for frames around the target frame."""
target_drive, _, target_frame_id = frames[target_index].split(' ')
target_pose = self.load_pose_raw(target_drive, target_frame_id)
start_index, end_index = get_seq_start_end(target_frame_id, self.seq_length)
pose_seq = []
for index in range(start_index, end_index + 1):
if index == target_frame_id:
continue
drive, _, frame_id = frames[index].split(' ')
pose = self.load_pose_raw(drive, frame_id)
# From target to index.
pose = np.dot(np.linalg.inv(pose), target_pose)
pose_seq.append(pose)
return pose_seq
def load_example(self, frames, target_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames, target_index)
target_drive, target_cam_id, target_frame_id = (
frames[target_index].split(' '))
intrinsics = self.load_intrinsics_raw(target_drive, target_cam_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_drive + '_' + target_cam_id + '/'
example['file_name'] = target_frame_id
if self.load_pose:
pose_seq = self.load_pose_sequence(frames, target_index)
example['pose_seq'] = pose_seq
return example
def load_pose_raw(self, drive, frame_id):
date = drive[:10]
pose_file = os.path.join(self.dataset_dir, date, drive, 'poses',
frame_id + '.txt')
with open(pose_file, 'r') as f:
pose = f.readline()
pose = np.array(pose.split(' ')).astype(np.float32).reshape(3, 4)
pose = np.vstack((pose, np.array([0, 0, 0, 1]).reshape((1, 4))))
return pose
def load_image_raw(self, drive, cam_id, frame_id):
date = drive[:10]
img_file = os.path.join(self.dataset_dir, date, drive, 'image_' + cam_id,
'data', frame_id + '.png')
img = scipy.misc.imread(img_file)
return img
def load_intrinsics_raw(self, drive, cam_id):
date = drive[:10]
calib_file = os.path.join(self.dataset_dir, date, 'calib_cam_to_cam.txt')
filedata = self.read_raw_calib_file(calib_file)
p_rect = np.reshape(filedata['P_rect_' + cam_id], (3, 4))
intrinsics = p_rect[:3, :3]
return intrinsics
# From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
def read_raw_calib_file(self, filepath):
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f:
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which we don't
# care about.
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class KittiOdom(object):
"""Reads KITTI odometry data files."""
def __init__(self, dataset_dir, img_height=128, img_width=416, seq_length=3):
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.train_seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
self.test_seqs = [9, 10]
self.collect_test_frames()
self.collect_train_frames()
def collect_test_frames(self):
self.test_frames = []
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_2')
num_frames = len(glob.glob(os.path.join(img_dir, '*.png')))
for n in range(num_frames):
self.test_frames.append('%.2d %.6d' % (seq, n))
self.num_test = len(self.test_frames)
def collect_train_frames(self):
self.train_frames = []
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_2')
num_frames = len(glob.glob(img_dir + '/*.png'))
for n in range(num_frames):
self.train_frames.append('%.2d %.6d' % (seq, n))
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, target_frame_index):
"""Checks whether we can find a valid sequence around this frame."""
num_frames = len(frames)
target_frame_drive, _ = frames[target_frame_index].split(' ')
start_index, end_index = get_seq_start_end(target_frame_index,
self.seq_length)
if start_index < 0 or end_index >= num_frames:
return False
start_drive, _ = frames[start_index].split(' ')
end_drive, _ = frames[end_index].split(' ')
if target_frame_drive == start_drive and target_frame_drive == end_drive:
return True
return False
def load_image_sequence(self, frames, target_frame_index):
"""Returns a sequence with requested target frame."""
start_index, end_index = get_seq_start_end(target_frame_index,
self.seq_length)
image_seq = []
for index in range(start_index, end_index + 1):
drive, frame_id = frames[index].split(' ')
img = self.load_image(drive, frame_id)
if index == target_frame_index:
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_example(self, frames, target_frame_index):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames,
target_frame_index)
target_frame_drive, target_frame_id = frames[target_frame_index].split(' ')
intrinsics = self.load_intrinsics(target_frame_drive, target_frame_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_frame_drive
example['file_name'] = target_frame_id
return example
def get_example_with_index(self, target_frame_index):
if not self.is_valid_sample(self.train_frames, target_frame_index):
return False
example = self.load_example(self.train_frames, target_frame_index)
return example
def load_image(self, drive, frame_id):
img_file = os.path.join(self.dataset_dir, 'sequences',
'%s/image_2/%s.png' % (drive, frame_id))
img = scipy.misc.imread(img_file)
return img
def load_intrinsics(self, drive, unused_frame_id):
calib_file = os.path.join(self.dataset_dir, 'sequences',
'%s/calib.txt' % drive)
proj_c2p, _ = self.read_calib_file(calib_file)
intrinsics = proj_c2p[:3, :3]
return intrinsics
def read_calib_file(self, filepath, cam_id=2):
"""Read in a calibration file and parse into a dictionary."""
def parse_line(line, shape):
data = line.split()
data = np.array(data[1:]).reshape(shape).astype(np.float32)
return data
with open(filepath, 'r') as f:
mat = f.readlines()
proj_c2p = parse_line(mat[cam_id], shape=(3, 4))
proj_v2c = parse_line(mat[-1], shape=(3, 4))
filler = np.array([0, 0, 0, 1]).reshape((1, 4))
proj_v2c = np.concatenate((proj_v2c, filler), axis=0)
return proj_c2p, proj_v2c
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
class Cityscapes(object):
"""Reads Cityscapes data files."""
def __init__(self,
dataset_dir,
split='train',
crop_bottom=CITYSCAPES_CROP_BOTTOM, # Crop the car logo.
crop_pct=CITYSCAPES_CROP_PCT,
sample_every=CITYSCAPES_SAMPLE_EVERY,
img_height=128,
img_width=416,
seq_length=3):
self.dataset_dir = dataset_dir
self.split = split
self.crop_bottom = crop_bottom
self.crop_pct = crop_pct
self.sample_every = sample_every
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.frames = self.collect_frames(split)
self.num_frames = len(self.frames)
if split == 'train':
self.num_train = self.num_frames
else:
self.num_test = self.num_frames
logging.info('Total frames collected: %d', self.num_frames)
def collect_frames(self, split):
img_dir = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', split)
city_list = os.listdir(img_dir)
frames = []
for city in city_list:
img_files = glob.glob(os.path.join(img_dir, city, '*.png'))
for f in img_files:
frame_id = os.path.basename(f).split('leftImg8bit')[0]
frames.append(frame_id)
return frames
def get_example_with_index(self, target_index):
target_frame_id = self.frames[target_index]
if not self.is_valid_example(target_frame_id):
return False
example = self.load_example(self.frames[target_index])
return example
def load_intrinsics(self, frame_id, split):
"""Read intrinsics data for frame."""
city, seq, _, _ = frame_id.split('_')
camera_file = os.path.join(self.dataset_dir, 'camera', split, city,
city + '_' + seq + '_*_camera.json')
camera_file = glob.glob(camera_file)[0]
with open(camera_file, 'r') as f:
camera = json.load(f)
fx = camera['intrinsic']['fx']
fy = camera['intrinsic']['fy']
u0 = camera['intrinsic']['u0']
v0 = camera['intrinsic']['v0']
# Cropping the bottom of the image and then resizing it to the same
# (height, width) amounts to stretching the image's height.
if self.crop_bottom:
fy *= 1.0 / self.crop_pct
intrinsics = np.array([[fx, 0, u0],
[0, fy, v0],
[0, 0, 1]])
return intrinsics
def is_valid_example(self, target_frame_id):
"""Checks whether we can find a valid sequence around this frame."""
city, snippet_id, target_local_frame_id, _ = target_frame_id.split('_')
start_index, end_index = get_seq_start_end(
int(target_local_frame_id), self.seq_length, self.sample_every)
for index in range(start_index, end_index + 1, self.sample_every):
local_frame_id = '%.6d' % index
frame_id = '%s_%s_%s_' % (city, snippet_id, local_frame_id)
image_filepath = os.path.join(self.dataset_dir, 'leftImg8bit_sequence',
self.split, city,
frame_id + 'leftImg8bit.png')
if not os.path.exists(image_filepath):
return False
return True
def load_image_sequence(self, target_frame_id):
"""Returns a sequence with requested target frame."""
city, snippet_id, target_local_frame_id, _ = target_frame_id.split('_')
start_index, end_index = get_seq_start_end(
int(target_local_frame_id), self.seq_length, self.sample_every)
image_seq = []
for index in range(start_index, end_index + 1, self.sample_every):
local_frame_id = '%.6d' % index
frame_id = '%s_%s_%s_' % (city, snippet_id, local_frame_id)
image_filepath = os.path.join(self.dataset_dir, 'leftImg8bit_sequence',
self.split, city,
frame_id + 'leftImg8bit.png')
img = scipy.misc.imread(image_filepath)
if self.crop_bottom:
ymax = int(img.shape[0] * self.crop_pct)
img = img[:ymax]
raw_shape = img.shape
if index == int(target_local_frame_id):
zoom_y = self.img_height / raw_shape[0]
zoom_x = self.img_width / raw_shape[1]
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
image_seq.append(img)
return image_seq, zoom_x, zoom_y
def load_example(self, target_frame_id):
"""Returns a sequence with requested target frame."""
image_seq, zoom_x, zoom_y = self.load_image_sequence(target_frame_id)
intrinsics = self.load_intrinsics(target_frame_id, self.split)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = target_frame_id.split('_')[0]
example['file_name'] = target_frame_id[:-1]
return example
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0, 0] *= sx
out[0, 2] *= sx
out[1, 1] *= sy
out[1, 2] *= sy
return out
def get_resource_path(relative_path):
return relative_path
def get_seq_start_end(target_index, seq_length, sample_every=1):
"""Returns absolute seq start and end indices for a given target frame."""
half_offset = int((seq_length - 1) / 2) * sample_every
end_index = target_index + half_offset
start_index = end_index - (seq_length - 1) * sample_every
return start_index, end_index
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
| |
__author__ = 'CwT'
import struct
import Util
import OatParse
class Symbol:
def __init__(self):
self.str = None # name
self.name = 0 # index of name
self.value = 0
self.size = 0
self.info = 0
self.other = 0
self.shndx = 0
self.size = 4 * 3 + 2 * 1 + 2
def readfromfd(self, file):
value = struct.unpack("IIIBBH", file.read(self.size))
self.name = value[0]
self.value = value[1]
self.size = value[2]
self.info = value[3]
self.other = value[4]
self.shndx = value[5]
def dump(self):
print "index(or name):", self.name if not self.str else self.str
print "value:", hex(self.value)
print "size:", hex(self.size)
print "info:", hex(self.info)
print "other:", hex(self.other)
print "section head index:", self.shndx
class Shdr:
TYPE = {
0: 'SHT_NULL', # No associated section
1: 'SHT_PROGBITS', # Program-defined contents
2: 'SHT_SYMTAB', # Symbol table
3: 'SHT_STRTAB', # String table
4: 'SHT_RELA', # Relocation entries; explicit addends
5: 'SHT_HASH', # Symbol hash table
6: 'SHT_DYNAMIC', # Information for dynamic linking
7: 'SHT_NOTE', # Information about the file
8: 'SHT_NOBITS', # Data occupies no space in the file
9: 'SHT_REL', # Relocation entries; no explicit addends
10: 'SHT_SHLIB', # Reserved
11: 'SHT_DYNSYM', # Symbol table
14: 'SHI_INIT_ARRAY', # Pointers to initialization functions
15: 'SHT_FINT_ARRAY', # Pointers to termination functions
16: 'SHT_PREINIT_ARRAY', # Pointers to pre-init functions
17: 'SHT_GROUP', # Section group
18: 'SHT_SYMTAB_SHNDX', # indices for SHN_XINDEX entries
0x60000000: 'SHT_LOOS', # Lowest operating system-specific type
0x6ffffff5: 'SHT_GNU_ATTRIBUTES', # object attributes
0x6fffffd6: 'SHT_GNU_HASH', # GNU-style hash table
0x6ffffffd: 'SHT_GNU_verdef', # GNU version definitions
0x6ffffffe: 'SHT_GNU_verneed', # GNU version references
0x6fffffff: 'SHT_GNU_versym', # GNU symbol versions table
0x70000001: 'SHT_ARM_EXIDX', # Exception index table
0x70000002: 'SHT_ARM_PREEMPTMAP', # BPABI DLL dynamic linking pre-emption map
0x70000003: 'SHT_ARM_ATTRIBUTES', # Object file compatibility attributes
0x70000004: 'SHT_ARM_DEBUGOVERLAY',
0x70000005: 'SHT_ARM_OVERLAYSECTIOn',
0x70000006: 'SHT_HEX_ORDERED',
}
SHF_WRITE = 1
SHF_ALLOC = 2
SHF_EXECINSTR = 0x4
SHF_MERGE = 0x10
SHF_STRINGS = 0x20
SHF_INFO_LINK = 0x40
SHF_LINK_ORDER = 0x80
SHF_OS_NONCONFORMING = 0x100
SHF_GROUP = 0x200
SHF_TLS = 0x400
SHF_EXCLUDE = 0x80000000
def __init__(self):
self.str = None
self.name = 0 # index of section header string table section
self.type = 0
self.flags = 0
self.addr = 0 # address in memory
self.offset = 0 # offset from the beginning of the file
self.size = 0
self.link = 0 # depend on its type
self.info = 0
self.addralign = 0
self.entsize = 0 # size of records contained within this section
self.size = 40
def setName(self, str):
self.str = str
def readfromfd(self, file):
value = struct.unpack("IIIIIIIIII", file.read(self.size))
self.name = value[0]
self.type = value[1]
self.flags = value[2]
self.addr = value[3]
self.offset = value[4]
self.size = value[5]
self.link = value[6]
self.info = value[7]
self.addralign = value[8]
self.entsize = value[9]
def dump(self):
print "Name index(or name):", self.name if not self.str else self.str
print "type:", hex(self.type) if self.type not in Shdr.TYPE.keys() else Shdr.TYPE[self.type]
print "flag:", hex(self.flags)
print "address in memory", hex(self.addr)
print "address in file", hex(self.offset)
print "size in memory", hex(self.size)
print "table link:", self.link
print "section info:", self.info
print "address alignment:", hex(self.addralign)
print "size of records:", self.entsize
class Phdr:
TYPE = {0: 'PT_NULL', # unused
1: 'PT_LOAD', # loadable segment
2: 'PT_DYNAMIC', # dynamic link information
3: 'PT_INTERP', # interpreter pathname
4: 'PT_NOTE', # Auxiliary information
5: 'PT_SHLIB', # Reserved
6: 'PT_PHDR', # The program header table itself
7: 'PT_TLS', # The thread pool storage template
0x60000000: 'PT_LOOS', # Lowest operating system-specific pt entry
0x6fffffff: 'PT_HIOS', # Highest operation system-specific pt entry
# 'PT_LOPROC': 0x70000000, # Lowest processor-specific program hdr entry
0x7fffffff: 'PT_HIPROC', # Highest ...
# x86-64 program header types
0x6474e550: 'PT_GNU_EH_FRAME',
0x6474e551: 'PT_GNU_STACK', # indicate stack executability
0x6474e552: 'PT_GNU_RELRO', # Read-only after relocation
# ARM program header types
0x70000000: 'PT_ARM_ARCHEXT', # platform architecture compatibility info
# These all contain stack unwind tables
0x70000001: 'PT_ARM_EXIDX', # same as PT_ARM_UNWIND
}
PF_X = 1 # execute
PF_W = 2 # write
PF_R = 4 # read
def isReadable(self):
return self.flags & Phdr.PF_R != 0
def isWritable(self):
return self.flags & Phdr.PF_W != 0
def isExecutable(self):
return self.flags & Phdr.PF_X != 0
def __init__(self):
self.type = 0
self.offset = 0
self.vaddr = 0 # virtual address
self.paddr = 0 # physical address
self.filesz = 0 # size in file
self.memsz = 0 # size in memory
self.flags = 0
self.align = 0
self.size = 32
def readfromfd(self, file):
value = struct.unpack("IIIIIIII", file.read(self.size))
self.type = value[0]
self.offset = value[1]
self.vaddr = value[2]
self.paddr = value[3]
self.filesz = value[4]
self.memsz = value[5]
self.flags = value[6]
self.align = value[7]
def dump(self):
print "Type:", Phdr.TYPE[self.type]
print "offset:", hex(self.offset)
print "virtual address:", hex(self.vaddr)
print "physical address:", hex(self.paddr)
print "size in file:", hex(self.filesz)
print "size in memory:", hex(self.memsz)
print "Flags:", "PF_R" if self.isReadable() else '', "PF_W" if self.isWritable() else '', "PF_E" if self.isExecutable() else ''
print "Alignment:", hex(self.align)
class Ehdr:
def __init__(self):
self.ident = [] # 16 bytes
self.type = 0 # 0: unknown; 1: relocation 2: execute 3: share
self.machine = 0
self.version = 0 # version: 1
self.entry = 0
self.phoff = 0 # program header table offset
self.shoff = 0 # section header table offset
self.flags = 0
self.ehsize = 0 # size of elf header
self.phentsize = 0 # size of each program table entry
self.phnum = 0 # number of program table entries
self.shentsize = 0 # size of each section table entry
self.shnum = 0 # number of section table entries
self.shstrndx = 0 # section header string index
self.size = 52 # total bytes
def readfd(self, file):
file.seek(0)
value = struct.unpack("16sHHIIIIIHHHHHH", file.read(self.size))
self.ident = value[0]
self.type = value[1]
self.machine = value[2]
self.version = value[3]
self.entry = value[4]
self.phoff = value[5]
self.shoff = value[6]
self.flags = value[7]
self.ehsize = value[8]
self.phentsize = value[9]
self.phnum = value[10]
self.shentsize = value[11]
self.shnum = value[12]
self.shstrndx = value[13]
def dump(self):
print "ident:",
for x in self.ident:
print hex(ord(x)),
print ''
print "type:", self.type
print "machine:", self.machine
print "version:", self.version
print "entry Addr:", hex(self.entry)
print "program header table offset:", hex(self.phoff)
print "section header table offset:", hex(self.shoff)
print "flag", hex(self.flags)
print "size of ELF header:", self.ehsize
print "size of program header:", self.phentsize
print "number of program header:", self.phnum
print "size of section header:", self.shentsize
print "number of section header:", self.shnum
print "index of string table section:", self.shstrndx
class ELFile:
def __init__(self):
self.ehdr = Ehdr()
self.shTab = None
self.symTab = None
def readfd(self, file):
self.ehdr.readfd(file)
def getShTab(self, file):
if self.shTab is not None:
return self.shTab
file.seek(self.ehdr.shoff)
shtab = []
for i in range(self.ehdr.shnum):
shdr = Shdr()
shdr.readfromfd(file)
shtab.append(shdr)
strtab = shtab[self.ehdr.shstrndx]
for i in range(self.ehdr.shnum):
shtab[i].setName(Util.getStrbyfd(file, shtab[i].name + strtab.offset))
self.shTab = shtab
return shtab
def getSymTab(self, file):
if self.symTab is not None:
return self.symTab
shtab = self.getShTab(file)
symtab = []
symStr = None
for shdr in shtab:
if shdr.str == '.dynsym': # DYNSYM
file.seek(shdr.offset)
for i in range(shdr.size / 16):
sym = Symbol()
sym.readfromfd(file)
symtab.append(sym)
elif shdr.str == '.dynstr':
symStr = shdr
if symStr is not None:
for sym in symtab:
sym.str = Util.getStrbyfd(file, sym.name + symStr.offset)
self.symTab = symtab
return symtab
def dump(self):
self.ehdr.dump()
def dumpPhdr(self, file):
file.seek(self.ehdr.phoff)
for i in range(self.ehdr.phnum):
phdr = Phdr()
phdr.readfromfd(file)
phdr.dump()
print ''
def dumpShdr(self, file):
shtab = self.getShTab(file)
for i in range(self.ehdr.shnum):
shtab[i].dump()
print ''
def dumpSymTab(self, file):
symtab = self.getSymTab(file)
for sym in symtab:
sym.dump()
print ''
if __name__ == '__main__':
elf = ELFile()
with open("test/c", "rb") as file:
elf.readfd(file)
elf.dumpShdr(file)
shtab = elf.getShTab(file)
for shdr in shtab:
if shdr.str == ".rodata":
oatFile = OatParse.OATfile()
oatFile.readfd(file, shdr.offset)
for dex in oatFile.getDexFiles(file):
print dex.name
| |
#! /usr/bin/python
#
# This script builds an OpenDSA textbook according to a specified configuration file
# - Creates an ODSA_Config object from the specified configuration file
# - Validates the configuration file and sets appropriate defaults for omitted fields (see ODSA_Config.py for more information)
# - Makes it easy to reference configuration options
# - Optionally builds JSAV to make sure the library is up-to-date, if specified in the configuration file
# - Initializes the output directory
# - Creates the output directory and a source directory inside it
# - Copies _static directory to the source directory
# - Creates a copy of the config file in the _static directory for use by the gradebook page
# - Generates an index.html file in the output directory of the new book which redirects (via JavaScript) to the book_output_dir (html/)
# - Traverses the 'chapters' section of the configuration file
# - For each chapter and module, maps the name to its chapter or module number (used for numbering during postprocessing)
# - Keeps track of modules encountered and prints an error message if a duplicate module name is detected
# - Creates an ODSA_RST_Module object for each module (see ODSA_RST_Module for more information)
# - Keeps track of the ToDo directives, images, and missing exercises encountered, as well as requirements that are satisfied
# - Maps the module name and number to the chapter its part of (used for correcting the chapter number during postprocessing)
# - Prints out a list of any exercises encountered in RST source files that did not appear in the config file
# - Generates ToDo.rst, if any TODO directives were encountered when processing the book AND if the configuration file does not suppress them
# - Creates table.json and page_chapter.json which are used by Sphinx during the building process
# - Generates a Makefile and conf.py based on templates found in config_templates.py
# - conf.py is configured to point to the original ODSAextensions and _themes directories
# - CONTROLLING INCLUSION OF GLOBAL JS AND CSS FILES - conf.py contains a dictionary called html_context
# which controls what JS and CSS files are included on ALL module pages, please see the assoicated comment
# for more information
# - Copies the images encountered while processing the book to the output source directory
# - Runs 'make' on the output directory to build the book using Sphinx
# - Calls update_TOC in postprocessor.py to update the chapter, section and module numbers
import sys
import os
import shutil
import distutils.dir_util
import distutils.file_util
import json
import collections
import re
import subprocess
import codecs
import datetime
from collections import Iterable
from optparse import OptionParser
from config_templates import *
from ODSA_RST_Module import ODSA_RST_Module
from ODSA_Config import ODSA_Config
from postprocessor import update_TOC, update_TermDef
# List of exercises encountered in RST files that do not appear in the
# configuration file
missing_exercises = []
# List of modules that have been processed, do not allow multiple modules
# with the same name (would cause a conflict in the database)
processed_modules = []
# List of images encountered while processing module files, these will be
# copied from the RST/Images to Images in the output source directory
images = []
# Stores information about ToDo directives
todo_list = []
# List of fulfilled prerequisite topics
satisfied_requirements = []
# Maps the chapter name and number to each module, used for correcting the
# numbers during postprocessing
module_chap_map = {}
# Dictionary which stores a mapping of sections to modules, modules to
# their numbers, and figures, tables, theorems, and equations to their
# numbers
num_ref_map = {}
# Prints the given string to standard error
def print_err(err_msg):
sys.stderr.write('%s\n' % err_msg)
# Processes a chapter or section of the book
# - config - a dictionary containing all the configuration options
# - section - a dictionary where all the keys are sections or modules
# - index_rst - the index.rst file being generated
# - depth - the depth of the recursion; 0 for the main chapters, 1 for any subsections, ..., N for the modules
# - current_section_numbers - a list that contains the numbering scheme for each section or module ([chapter].[section].[...].[module])
# - section_name - a string passed to modules for inclusion in the RST header that specifies which chapter / section the module belongs to
def process_section(config, section, index_rst, depth, current_section_numbers=[], section_name=''):
# Initialize the section number for the current depth
if depth >= len(current_section_numbers):
current_section_numbers.append(config.start_chap_num)
for subsect in section:
# Parse the subsection name by eliminating the path and file extension
# if its a module
subsect_name = os.path.splitext(os.path.basename(subsect))[0]
num_ref_map[subsect_name] = -1 # Add the section name to num_ref_map
if not isinstance(section[subsect], Iterable):
continue
if 'exercises' in section[subsect]:
process_module(config, index_rst, subsect, section[
subsect], depth, current_section_numbers, section_name)
else:
# List of characters Sphinx uses for headers, the depth of a
# section header determines which character to use
sphinx_header_chars = ['=', '-', '`', "'", '.', '*', '+', '^']
print(" " * depth) + subsect
index_rst.write(subsect + '\n')
index_rst.write(
(sphinx_header_chars[depth] * len(subsect)) + "\n\n")
# if the chapter is hidden we use odsatoctree
# the div wrapping the chapter and module will have the
# 'hide-from-toc' class and will be deleted from the TOC
if 'hidden' in section[subsect]:
index_rst.write(".. odsatoctree::\n")
else:
index_rst.write(".. toctree::\n")
index_rst.write(" :numbered:\n")
index_rst.write(" :maxdepth: 3\n\n")
process_section(config, section[
subsect], index_rst, depth + 1, current_section_numbers, subsect_name)
# Increments the section count at the current depth
current_section_numbers[depth] += 1
# Reset the section number when done processing the current level
if depth >= 0:
current_section_numbers[depth] = config.start_chap_num
index_rst.write("\n")
# Processes a module
# - config - a dictionary containing all the configuration options
# - index_rst - the index.rst file being generated
# - mod_path - the path to the module relative to the RST/<lang> directory
# - mod_attrib - dictionary containing the module data, 'exercises' is a mandatory field (even if its empty)
# - depth - the depth of the recursion, used to determine the number of spaces to print before the module name to ensure proper indentation
# - current_section_numbers - a list that contains the numbering scheme for each section or module ([chapter].[section].[...].[module])
# - section_name - a string passed to modules for inclusion in the RST header that specifies which chapter / section the module belongs to
def process_module(config, index_rst, mod_path, mod_attrib={'exercises': {}}, depth=0, current_section_numbers=[], section_name=''):
global todo_list
global images
global missing_exercises
global satisfied_requirements
global module_chap_map
global num_ref_map
global cmap_map
# Parse the name of the module from mod_path and remove the file extension
# if it exists
mod_name = os.path.splitext(os.path.basename(mod_path))[0]
# Update the reference for each section to point to the first module in
# the section
if section_name != '' and num_ref_map[section_name] == -1:
num_ref_map[section_name] = mod_name
# Print error message and exit if duplicate module name is detected
if mod_name in processed_modules:
print_err(
'ERROR: Duplicate module name detected, module: %s' % mod_name)
sys.exit(1)
# Add module to list of modules processed
processed_modules.append(mod_name)
print(" " * depth) + mod_name
index_rst.write(" %s\n" % mod_name)
# Initialize the module
module = ODSA_RST_Module(
config, mod_path, mod_attrib, satisfied_requirements, section_name, depth, current_section_numbers)
# Append data from the processed module to the global variables
todo_list += module.todo_list
images += module.images
missing_exercises += module.missing_exercises
satisfied_requirements += module.requirements_satisfied
num_ref_map = dict(num_ref_map.items() + module.num_ref_map.items())
if len(module.cmap_dict['concepts']) > 0:
cmap_map = module.cmap_dict
# Maps the chapter name and number to each module, used for correcting the numbers during postprocessing
# Have to ignore the last number because that is the module number (which
# is already provided by Sphinx)
module_chap_map[mod_name] = [
section_name, '.'.join(str(i) for i in current_section_numbers[:-1])]
# Hack to maintain the same numbering scheme as the old preprocessor
mod_num = ''
if len(current_section_numbers) > 0:
mod_num = '%s.%d' % ('.'.join(
str(j) for j in current_section_numbers[:-1]), (current_section_numbers[-1] + 1))
num_ref_map[mod_name] = mod_num
def generate_index_rst(config, slides=False):
"""Generates the index.rst file, calls process_section() on config.chapters to recursively process all the modules in the book (in order), as each is processed it is added to the index.rst"""
print "Generating index.rst\n"
print "Processing..."
header_data = {}
header_data['mod_name'] = 'index'
header_data['dispModComp'] = 'false'
header_data['long_name'] = 'Contents'
header_data['mod_chapter'] = ''
header_data['mod_date'] = str(datetime.datetime.now()).split('.')[0]
header_data['mod_options'] = ''
header_data['build_cmap'] = str(config.build_cmap).lower()
header_data['unicode_directive'] = rst_header_unicode if not slides else ''
# Generate the index.rst file
with codecs.open(config.book_src_dir + 'index.rst', 'w+', "utf-8") as index_rst:
index_rst.write(index_header.format(config.start_chap_num))
index_rst.write(rst_header % header_data)
# Process all the chapter and module information
process_section(config, config.chapters, index_rst, 0)
index_rst.write(".. toctree::\n")
index_rst.write(" :maxdepth: 3\n\n")
# Process the Gradebook and Registerbook as well
if not slides:
process_module(config, mod_path='Gradebook', index_rst=index_rst)
process_module(config, mod_path='RegisterBook', index_rst=index_rst)
# If a ToDo file will be generated, append it to index.rst
if len(todo_list) > 0:
index_rst.write(" ToDo\n")
index_rst.write("\n")
index_rst.write("* :ref:`genindex`\n")
index_rst.write("* :ref:`search`\n")
def generate_todo_rst(config, slides=False):
"""Sorts the list of ToDo directives (generated while recursively processing each module) by type and writes them all out to a file"""
print '\nGenerating ToDo file...'
# Sort the list of todo items by type (module_name, type, todo_directive)
sorted_todo_list = sorted(todo_list, key=lambda todo: todo[2])
with open(''.join([config.book_src_dir, 'ToDo.rst']), 'w') as todo_file:
header_data = {}
header_data['mod_name'] = 'ToDo'
header_data['long_name'] = 'ToDo'
header_data['dispModComp'] = False
header_data['mod_chapter'] = ''
header_data['mod_date'] = str(datetime.datetime.now()).split('.')[0]
header_data['mod_options'] = ''
header_data['build_cmap'] = str(config.build_cmap).lower()
header_data[
'unicode_directive'] = rst_header_unicode if not slides else ''
todo_file.write(rst_header % header_data)
todo_file.write(todo_rst_template)
current_type = ''
for (todo_id, mod_name, todo_type, todo_directive) in sorted_todo_list:
if todo_type == '':
todo_type = 'No Category'
# Whenever a new type is encountered, print a header for that type
if current_type != todo_type:
todo_file.write(
'.. raw:: html\n\n <hr /><h1>%s</h1><hr />\n\n' % todo_type)
current_type = todo_type
# Write a header with the name of the file where the ToDo
# originated that hyperlinks directly to the original ToDo
todo_file.write('.. raw:: html\n\n <h2><a href="' + mod_name +
'.html#' + todo_id + '">source: ' + mod_name + '</a></h2>\n\n')
# Clean up and write the TODO directive itself
todo_file.write(
'\n'.join(todo_directive).encode('utf-8').strip() + '\n\n')
def initialize_output_directory(config):
"""Creates the output directory (if applicable) and copies the necesssary files to it"""
# Create the output directory if it doesn't exist
# Will actually create '<build_dir>/<book_name>/source/Images/'
distutils.dir_util.mkpath(config.book_src_dir + 'Images/')
# Copy _static from RST/ to the book source directory
distutils.dir_util.copy_tree(
config.odsa_dir + 'RST/_static/', config.book_src_dir + '_static', update=1)
# Copy config file to _static directory
distutils.file_util.copy_file(
config.config_file_path, config.book_src_dir + '_static/')
# Copy translation file to _static directory
distutils.file_util.copy_file(
config.lang_file, config.book_src_dir + '_static/')
# Create source/_static/config.js in the output directory
# Used to set global settings for the client-side framework
with open(config.book_src_dir + '_static/config.js', 'w') as config_js:
config_js.writelines(config_js_template % config)
# Create an index.html page in the book directory that redirects the user
# to the book_output_dir
with open(config.book_dir + 'index.html', 'w') as index_html:
index_html.writelines(
index_html_template % config.rel_book_output_path)
def initialize_conf_py_options(config, slides):
"""Initializes the options used to generate conf.py"""
options = {}
options['title'] = config.title
options['book_name'] = config.book_name
options['exercise_server'] = config.exercise_server
options['logging_server'] = config.logging_server
options['score_server'] = config.score_server
options['module_origin'] = config.module_origin
options['theme_dir'] = config.theme_dir
options['theme'] = config.theme
options['odsa_dir'] = config.odsa_dir
options['book_dir'] = config.book_dir
options['code_dir'] = config.code_dir
options['tag'] = config.tag
options['tabbed_code'] = config.tabbed_codeinc
options['code_lang'] = json.dumps(config.code_lang)
options['text_lang'] = json.dumps(config.lang)
#Adding multiple tags
if config.tag:
tags_string = ""
tags_array = []
tags_array += [a.strip() for a in config.tag.split(';')]
for tag in tags_array:
tags_string += " -t "+tag
options["tag"] = tags_string
else:
options["tag"] = ""
# convert the translation text into unicode sstrings
tmpSTR = ''
for k, v in config.text_translated.iteritems():
tmpSTR = tmpSTR + '"%s":u"%s",' % (k, v)
options['text_translated'] = tmpSTR
options['av_root_dir'] = config.av_root_dir
options['exercises_root_dir'] = config.exercises_root_dir
# The relative path between the ebook output directory (where the HTML
# files are generated) and the root ODSA directory
options['eb2root'] = config.rel_build_to_odsa_path
options['rel_book_output_path'] = config.rel_book_output_path
options['slides_lib'] = 'hieroglyph' if slides else ''
options['local_mode'] = str(config.local_mode).title()
return options
def configure(config_file_path, options):
"""Configure an OpenDSA textbook based on a validated configuration file"""
global satisfied_requirements
slides = options.slides
print "Configuring OpenDSA, using " + config_file_path
# Load and validate the configuration
config = ODSA_Config(config_file_path, options.output_directory)
# Delete everything in the book's HTML directory, otherwise the
# post-processor can sometimes append chapter numbers to the existing HTML
# files, making the numbering incorrect
html_dir = config.book_dir + config.rel_book_output_path
if os.path.isdir(html_dir):
print "Clearing HTML directory"
shutil.rmtree(html_dir)
# Add the list of topics the book assumes students know to the list of
# fulfilled prereqs
if config.assumes:
satisfied_requirements += [a.strip()
for a in config.assumes.split(';')]
# Optionally rebuild JSAV
if config.build_JSAV:
print "Building JSAV\n"
status = 0
with open(os.devnull, "w") as fnull:
status = subprocess.check_call(
'make -s -C %s' % (config.odsa_dir + 'JSAV/'), shell=True, stdout=fnull)
if status != 0:
print_err("JSAV make failed")
print_err(status)
sys.exit(1)
print "Writing files to " + config.book_dir + "\n"
# local mode option
config.local_mode = str(options.local).lower()
# Initialize output directory, create index.rst, and process all of the
# modules
initialize_output_directory(config)
generate_index_rst(config, slides)
# Print out a list of any exercises found in RST files that do not appear
# in the config file
if len(missing_exercises) > 0:
print_err("\nExercises Not Listed in Config File:")
for exercise in missing_exercises:
print_err(' ' + exercise)
# Print an extra line to separate this section from any additional
# errors
print_err('')
# Stop if we are just running a dry-run
if options.dry_run:
return
# Entries are only added to todo_list if config.suppress_todo is False
if len(todo_list) > 0:
generate_todo_rst(config, slides)
# Dump num_ref_map to table.json to be used by the Sphinx directives
with open(config.book_dir + 'table.json', 'w') as num_ref_map_file:
json.dump(num_ref_map, num_ref_map_file)
# Dump module_chap_map to page_chapter.json to be used by the avmetadata directive
# NOTE: avmetadata is deprecated (it was used to generate the concept map but is no longer used)
# If avmetadata is eventually removed, we can stop writing this file
with open(config.book_dir + 'page_chapter.json', 'w') as page_chapter_file:
json.dump(module_chap_map, page_chapter_file)
# Initialize options for conf.py
options = initialize_conf_py_options(config, slides)
# Create a Makefile in the output directory
with open(config.book_dir + 'Makefile', 'w') as makefile:
makefile.writelines(makefile_template % options)
# Create conf.py file in output source directory
with codecs.open(config.book_src_dir + 'conf.py', 'w', "utf-8") as conf_py:
conf_py.writelines(conf % options)
# Copy only the images used by the book from RST/Images/ to the book
# source directory
for image in images:
distutils.file_util.copy_file(
'%sRST/Images/%s' % (config.odsa_dir, image), config.book_src_dir + 'Images/')
# Run make on the output directory
print '\nBuilding textbook...'
if slides:
proc = subprocess.Popen(
['make', '-C', config.book_dir, 'slides'], stdout=subprocess.PIPE)
else:
proc = subprocess.Popen(
['make', '-C', config.book_dir], stdout=subprocess.PIPE)
for line in iter(proc.stdout.readline, ''):
print line.rstrip()
# Calls the postprocessor to update chapter, section, and module numbers,
# and glossary terms definition
update_TOC(config.book_src_dir, config.book_dir +
config.rel_book_output_path, module_chap_map)
if 'Glossary' in processed_modules:
update_TermDef(
config.book_dir + config.rel_book_output_path + 'Glossary.html', cmap_map['concepts'])
# Create the concept map definition file in _static html directory
with codecs.open(config.book_dir + 'html/_static/GraphDefs.json', 'w', 'utf-8') as graph_defs_file:
json.dump(cmap_map, graph_defs_file)
# Code to execute when run as a standalone program
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-s", "--slides", help="Causes configure.py to create slides",dest="slides", action="store_true", default=False)
parser.add_option("--dry-run", help="Causes configure.py to configure the book but stop before compiling it",dest="dry_run", action="store_true", default=False)
parser.add_option("--local", help="Causes the compiled book to work in local mode, which means no communication with the server",dest="local", action="store_true", default=False)
parser.add_option("-o", help="Accepts a custom directory name instead of using the config file's name.",dest="output_directory", default=None)
(options, args) = parser.parse_args()
if options.slides:
os.environ['SLIDES'] = 'yes'
else:
os.environ['SLIDES'] = 'no'
# Process script arguments
if len(args) != 1:
print_err(
"Usage: " + sys.argv[0] + " [-s] [--dry-run] ")
sys.exit(1)
configure(args[0], options)
| |
# imaplib utilities
# Copyright (C) 2002-2015 John Goerzen & contributors
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import fcntl
import time
import subprocess
from sys import exc_info
import threading
from hashlib import sha1
import socket
import errno
from offlineimap.ui import getglobalui
from offlineimap import OfflineImapError
from offlineimap.imaplib2 import IMAP4, IMAP4_SSL, zlib, InternalDate, Mon2num
class UsefulIMAPMixIn(object):
def __getselectedfolder(self):
if self.state == 'SELECTED':
return self.mailbox
return None
def select(self, mailbox='INBOX', readonly=False, force=False):
"""Selects a mailbox on the IMAP server
:returns: 'OK' on success, nothing if the folder was already
selected or raises an :exc:`OfflineImapError`."""
if self.__getselectedfolder() == mailbox and \
self.is_readonly == readonly and \
not force:
# No change; return.
return
try:
result = super(UsefulIMAPMixIn, self).select(mailbox, readonly)
except self.readonly as e:
# pass self.readonly to our callers
raise
except self.abort as e:
# self.abort is raised when we are supposed to retry
errstr = "Server '%s' closed connection, error on SELECT '%s'. Ser"\
"ver said: %s" % (self.host, mailbox, e.args[0])
severity = OfflineImapError.ERROR.FOLDER_RETRY
raise OfflineImapError(errstr, severity), None, exc_info()[2]
if result[0] != 'OK':
#in case of error, bail out with OfflineImapError
errstr = "Error SELECTing mailbox '%s', server reply:\n%s" %\
(mailbox, result)
severity = OfflineImapError.ERROR.FOLDER
raise OfflineImapError(errstr, severity)
return result
# Overrides private function from IMAP4 (@imaplib2)
def _mesg(self, s, tn=None, secs=None):
new_mesg(self, s, tn, secs)
# Overrides private function from IMAP4 (@imaplib2)
def open_socket(self):
"""open_socket()
Open socket choosing first address family available."""
msg = (-1, 'could not open socket')
for res in socket.getaddrinfo(self.host, self.port, self.af, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
# use socket of our own, possiblly socksified socket.
s = self.socket(af, socktype, proto)
except socket.error, msg:
continue
try:
for i in (0, 1):
try:
s.connect(sa)
break
except socket.error, msg:
if len(msg.args) < 2 or msg.args[0] != errno.EINTR:
raise
else:
raise socket.error(msg)
except socket.error, msg:
s.close()
continue
break
else:
raise socket.error(msg)
return s
class IMAP4_Tunnel(UsefulIMAPMixIn, IMAP4):
"""IMAP4 client class over a tunnel
Instantiate with: IMAP4_Tunnel(tunnelcmd)
tunnelcmd -- shell command to generate the tunnel.
The result will be in PREAUTH stage."""
def __init__(self, tunnelcmd, **kwargs):
if "use_socket" in kwargs:
self.socket = kwargs['use_socket']
del kwargs['use_socket']
IMAP4.__init__(self, tunnelcmd, **kwargs)
def open(self, host, port):
"""The tunnelcmd comes in on host!"""
self.host = host
self.process = subprocess.Popen(host, shell=True, close_fds=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(self.outfd, self.infd) = (self.process.stdin, self.process.stdout)
# imaplib2 polls on this fd
self.read_fd = self.infd.fileno()
self.set_nonblocking(self.read_fd)
def set_nonblocking(self, fd):
"""Mark fd as nonblocking"""
# get the file's current flag settings
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
# clear non-blocking mode from flags
fl = fl & ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
def read(self, size):
"""data = read(size)
Read at most 'size' bytes from remote."""
if self.decompressor is None:
return os.read(self.read_fd, size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = os.read(self.read_fd, 8192)
return self.decompressor.decompress(data, size)
def send(self, data):
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.outfd.write(data)
def shutdown(self):
self.infd.close()
self.outfd.close()
self.process.wait()
def new_mesg(self, s, tn=None, secs=None):
if secs is None:
secs = time.time()
if tn is None:
tn = threading.currentThread().getName()
tm = time.strftime('%M:%S', time.localtime(secs))
getglobalui().debug('imap', ' %s.%02d %s %s' % (tm, (secs*100)%100, tn, s))
class WrappedIMAP4_SSL(UsefulIMAPMixIn, IMAP4_SSL):
"""Improved version of imaplib.IMAP4_SSL overriding select()."""
def __init__(self, *args, **kwargs):
if "af" in kwargs:
self.af = kwargs['af']
del kwargs['af']
if "use_socket" in kwargs:
self.socket = kwargs['use_socket']
del kwargs['use_socket']
self._fingerprint = kwargs.get('fingerprint', None)
if type(self._fingerprint) != type([]):
self._fingerprint = [self._fingerprint]
if 'fingerprint' in kwargs:
del kwargs['fingerprint']
super(WrappedIMAP4_SSL, self).__init__(*args, **kwargs)
def open(self, host=None, port=None):
if not self.ca_certs and not self._fingerprint:
raise OfflineImapError("No CA certificates "
"and no server fingerprints configured. "
"You must configure at least something, otherwise "
"having SSL helps nothing.", OfflineImapError.ERROR.REPO)
super(WrappedIMAP4_SSL, self).open(host, port)
if self._fingerprint:
# compare fingerprints
fingerprint = sha1(self.sock.getpeercert(True)).hexdigest()
if fingerprint not in self._fingerprint:
raise OfflineImapError("Server SSL fingerprint '%s' "
"for hostname '%s' "
"does not match configured fingerprint(s) %s. "
"Please verify and set 'cert_fingerprint' accordingly "
"if not set yet."%
(fingerprint, host, self._fingerprint),
OfflineImapError.ERROR.REPO)
class WrappedIMAP4(UsefulIMAPMixIn, IMAP4):
"""Improved version of imaplib.IMAP4 overriding select()."""
def __init__(self, *args, **kwargs):
if "af" in kwargs:
self.af = kwargs['af']
del kwargs['af']
if "use_socket" in kwargs:
self.socket = kwargs['use_socket']
del kwargs['use_socket']
IMAP4.__init__(self, *args, **kwargs)
def Internaldate2epoch(resp):
"""Convert IMAP4 INTERNALDATE to UT.
Returns seconds since the epoch."""
from calendar import timegm
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == '-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
return timegm(tt) - zone
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta
import imp
import os
import routes
import webob.dec
import webob.exc
from quantum.api.v2 import attributes
from quantum.common import constants
from quantum.common import exceptions
import quantum.extensions
from quantum.manager import QuantumManager
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum import wsgi
LOG = logging.getLogger('quantum.api.extensions')
class PluginInterface(object):
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, klass):
"""
The __subclasshook__ method is a class method
that will be called everytime a class is tested
using issubclass(klass, PluginInterface).
In that case, it will check that every method
marked with the abstractmethod decorator is
provided by the plugin class.
"""
for method in cls.__abstractmethods__:
if any(method in base.__dict__ for base in klass.__mro__):
continue
return NotImplemented
return True
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
raise NotImplementedError()
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
raise NotImplementedError()
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
raise NotImplementedError()
def get_namespace(self):
"""The XML namespace for the extension.
e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
"""
raise NotImplementedError()
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
raise NotImplementedError()
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_request_extensions(self):
"""List of extensions.RequestException extension objects.
Request extensions are used to handle custom request data.
"""
request_exts = []
return request_exts
def get_extended_resources(self, version):
"""retrieve extended resources or attributes for core resources.
Extended attributes are implemented by a core plugin similarly
to the attributes defined in the core, and can appear in
request and response messages. Their names are scoped with the
extension's prefix. The core API version is passed to this
function, which must return a
map[<resource_name>][<attribute_name>][<attribute_property>]
specifying the extended resource attribute properties required
by that API version.
Extension can add resources and their attr definitions too.
The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
"""
return {}
def get_plugin_interface(self):
"""
Returns an abstract class which defines contract for the plugin.
The abstract class should inherit from extesnions.PluginInterface,
Methods in this abstract class should be decorated as abstractmethod
"""
return None
class ActionExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, request, id):
input_dict = self._deserialize(request.body,
request.get_content_type())
for action_name, handler in self.action_handlers.iteritems():
if action_name in input_dict:
return handler(input_dict, request, id)
# no action handler found (bump to downstream application)
response = self.application
return response
class RequestExtensionController(wsgi.Controller):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, request, *args, **kwargs):
res = request.get_response(self.application)
# currently request handlers are un-ordered
for handler in self.handlers:
response = handler(request, res)
return response
class ExtensionController(wsgi.Controller):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['namespace'] = ext.get_namespace()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, request):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, request, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, request, id):
raise webob.exc.HTTPNotFound()
def create(self, request):
raise webob.exc.HTTPNotFound()
class ExtensionMiddleware(wsgi.Middleware):
"""Extensions middleware for WSGI."""
def __init__(self, application,
ext_mgr=None):
self.ext_mgr = (ext_mgr
or ExtensionManager(
get_extensions_path()))
mapper = routes.Mapper()
# extended resources
for resource in self.ext_mgr.get_resources():
path_prefix = resource.path_prefix
if resource.parent:
path_prefix = (resource.path_prefix +
"/%s/{%s_id}" %
(resource.parent["collection_name"],
resource.parent["member_name"]))
LOG.debug(_('Extended resource: %s'),
resource.collection)
for action, method in resource.collection_actions.iteritems():
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path)
submap.connect("%s.:(format)" % path)
mapper.resource(resource.collection, resource.collection,
controller=resource.controller,
member=resource.member_actions,
parent_resource=resource.parent,
path_prefix=path_prefix)
# extended actions
action_controllers = self._action_ext_controllers(application,
self.ext_mgr, mapper)
for action in self.ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
# extended requests
req_controllers = self._request_ext_controllers(application,
self.ext_mgr, mapper)
for request_ext in self.ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_controllers(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionController-s by collection."""
action_controllers = {}
for action in ext_mgr.get_actions():
if action.collection not in action_controllers.keys():
controller = ActionExtensionController(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
controller=controller,
conditions=dict(method=['POST']))
action_controllers[action.collection] = controller
return action_controllers
def _request_ext_controllers(self, application, ext_mgr, mapper):
"""Returns a dict of RequestExtensionController-s by collection."""
request_ext_controllers = {}
for req_ext in ext_mgr.get_request_extensions():
if req_ext.key not in request_ext_controllers.keys():
controller = RequestExtensionController(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=controller,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=controller,
conditions=req_ext.conditions)
request_ext_controllers[req_ext.key] = controller
return request_ext_controllers
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Route the incoming request with router."""
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
"""Dispatch the request.
Returns the routed WSGI app's response or defers to the extended
application.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
def plugin_aware_extension_middleware_factory(global_config, **local_config):
"""Paste factory."""
def _factory(app):
ext_mgr = PluginAwareExtensionManager.get_instance()
return ExtensionMiddleware(app, ext_mgr=ext_mgr)
return _factory
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See tests/unit/extensions/foxinsocks.py for an
example extension implementation.
"""
def __init__(self, path):
LOG.info(_('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionController(self)))
for ext in self.extensions.itervalues():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_actions(self):
"""Returns a list of ActionExtension objects."""
actions = []
for ext in self.extensions.itervalues():
try:
actions.extend(ext.get_actions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have action
# extensions
pass
return actions
def get_request_extensions(self):
"""Returns a list of RequestExtension objects."""
request_exts = []
for ext in self.extensions.itervalues():
try:
request_exts.extend(ext.get_request_extensions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have request
# extensions
pass
return request_exts
def extend_resources(self, version, attr_map):
"""Extend resources with additional resources or attributes.
:param: attr_map, the existing mapping from resource name to
attrs definition.
After this function, we will extend the attr_map if an extension
wants to extend this map.
"""
for ext in self.extensions.itervalues():
if not hasattr(ext, 'get_extended_resources'):
continue
try:
extended_attrs = ext.get_extended_resources(version)
for resource, resource_attrs in extended_attrs.iteritems():
if attr_map.get(resource, None):
attr_map[resource].update(resource_attrs)
else:
attr_map[resource] = resource_attrs
if extended_attrs:
attributes.EXT_NSES[ext.get_alias()] = ext.get_namespace()
except AttributeError:
LOG.exception(_("Error fetching extended attributes for "
"extension '%s'"), ext.get_name())
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.get_name())
LOG.debug(_('Ext alias: %s'), extension.get_alias())
LOG.debug(_('Ext description: %s'), extension.get_description())
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
if hasattr(extension, 'check_env'):
try:
extension.check_env()
except exceptions.InvalidExtenstionEnv as ex:
LOG.warn(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
Load extensions from the configured path. The extension name is
constructed from the module_name. If your extension module was named
widgets.py the extension class within that module should be
'Widgets'.
See tests/unit/extensions/foxinsocks.py for an example
extension implementation.
"""
for path in self.path.split(':'):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error(_("Extension path '%s' doesn't exist!"), path)
def _load_all_extensions_from_path(self, path):
for f in os.listdir(path):
try:
LOG.info(_('Loading extension file: %s'), f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warn(_('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warn(_("Extension file %(f)s wasn't loaded due to "
"%(exception)s"), locals())
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.info(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exceptions.Error(_("Found duplicate extension: %s") %
alias)
self.extensions[alias] = ext
class PluginAwareExtensionManager(ExtensionManager):
_instance = None
def __init__(self, path, plugins):
self.plugins = plugins
super(PluginAwareExtensionManager, self).__init__(path)
def _check_extension(self, extension):
"""Checks if any of plugins supports extension and implements the
extension contract."""
extension_is_valid = super(PluginAwareExtensionManager,
self)._check_extension(extension)
return (extension_is_valid and
self._plugins_support(extension) and
self._plugins_implement_interface(extension))
def _plugins_support(self, extension):
alias = extension.get_alias()
supports_extension = any((hasattr(plugin,
"supported_extension_aliases") and
alias in plugin.supported_extension_aliases)
for plugin in self.plugins.values())
if not supports_extension:
LOG.warn(_("Extension %s not supported by any of loaded plugins"),
alias)
return supports_extension
def _plugins_implement_interface(self, extension):
if(not hasattr(extension, "get_plugin_interface") or
extension.get_plugin_interface() is None):
return True
for plugin in self.plugins.values():
if isinstance(plugin, extension.get_plugin_interface()):
return True
LOG.warn(_("Loaded plugins do not implement extension %s interface"),
extension.get_alias())
return False
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls(get_extensions_path(),
QuantumManager.get_service_plugins())
return cls._instance
class RequestExtension(object):
"""Extend requests and responses of core Quantum OpenStack API controllers.
Provide a way to add data to responses and handle custom request data
that is sent to core Quantum OpenStack API controllers.
"""
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
"""Add custom actions to core Quantum OpenStack API controllers."""
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in Quantum."""
def __init__(self, collection, controller, parent=None, path_prefix="",
collection_actions={}, member_actions={}, attr_map={}):
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.path_prefix = path_prefix
self.attr_map = attr_map
# Returns the extention paths from a config entry and the __path__
# of quantum.extensions
def get_extensions_path():
paths = ':'.join(quantum.extensions.__path__)
if cfg.CONF.api_extensions_path:
paths = ':'.join([cfg.CONF.api_extensions_path, paths])
return paths
| |
import copy
from pytest import raises
from invoke.parser import Argument, Context
from invoke.tasks import task
from invoke.collection import Collection
class Context_:
"ParserContext" # meh
def may_have_a_name(self):
c = Context(name="taskname")
assert c.name == "taskname"
def may_have_aliases(self):
c = Context(name="realname", aliases=("othername", "yup"))
assert "othername" in c.aliases
def may_give_arg_list_at_init_time(self):
a1 = Argument("foo")
a2 = Argument("bar")
c = Context(name="name", args=(a1, a2))
assert c.args["foo"] is a1
# TODO: reconcile this sort of test organization with the .flags oriented
# tests within 'add_arg'. Some of this behavior is technically driven by
# add_arg.
class args:
def setup(self):
self.c = Context(
args=(
Argument("foo"),
Argument(names=("bar", "biz")),
Argument("baz", attr_name="wat"),
)
)
def exposed_as_dict(self):
assert "foo" in self.c.args.keys()
def exposed_as_Lexicon(self):
assert self.c.args.bar == self.c.args["bar"]
def args_dict_includes_all_arg_names(self):
for x in ("foo", "bar", "biz"):
assert x in self.c.args
def argument_attr_names_appear_in_args_but_not_flags(self):
# Both appear as "Python-facing" args
for x in ("baz", "wat"):
assert x in self.c.args
# But attr_name is for Python access only and isn't shown to the
# parser.
assert "wat" not in self.c.flags
class add_arg:
def setup(self):
self.c = Context()
def can_take_Argument_instance(self):
a = Argument(names=("foo",))
self.c.add_arg(a)
assert self.c.args["foo"] is a
def can_take_name_arg(self):
self.c.add_arg("foo")
assert "foo" in self.c.args
def can_take_kwargs_for_single_Argument(self):
self.c.add_arg(names=("foo", "bar"))
assert "foo" in self.c.args and "bar" in self.c.args
def raises_ValueError_on_duplicate(self):
self.c.add_arg(names=("foo", "bar"))
with raises(ValueError):
self.c.add_arg(name="bar")
def adds_flaglike_name_to_dot_flags(self):
"adds flaglike name to .flags"
self.c.add_arg("foo")
assert "--foo" in self.c.flags
def adds_all_names_to_dot_flags(self):
"adds all names to .flags"
self.c.add_arg(names=("foo", "bar"))
assert "--foo" in self.c.flags
assert "--bar" in self.c.flags
def adds_true_bools_to_inverse_flags(self):
self.c.add_arg(name="myflag", default=True, kind=bool)
assert "--myflag" in self.c.flags
assert "--no-myflag" in self.c.inverse_flags
assert self.c.inverse_flags["--no-myflag"] == "--myflag"
def inverse_flags_works_right_with_task_driven_underscored_names(self):
# Use a Task here instead of creating a raw argument, we're partly
# testing Task.get_arguments()' transform of underscored names
# here. Yes that makes this an integration test, but it's nice to
# test it here at this level & not just in cli tests.
@task
def mytask(c, underscored_option=True):
pass
self.c.add_arg(mytask.get_arguments()[0])
flags = self.c.inverse_flags["--no-underscored-option"]
assert flags == "--underscored-option"
def turns_single_character_names_into_short_flags(self):
self.c.add_arg("f")
assert "-f" in self.c.flags
assert "--f" not in self.c.flags
def adds_positional_args_to_positional_args(self):
self.c.add_arg(name="pos", positional=True)
assert self.c.positional_args[0].name == "pos"
def positional_args_empty_when_none_given(self):
assert len(self.c.positional_args) == 0
def positional_args_filled_in_order(self):
self.c.add_arg(name="pos1", positional=True)
assert self.c.positional_args[0].name == "pos1"
self.c.add_arg(name="abc", positional=True)
assert self.c.positional_args[1].name == "abc"
def positional_arg_modifications_affect_args_copy(self):
self.c.add_arg(name="hrm", positional=True)
assert self.c.args["hrm"].value == self.c.positional_args[0].value
self.c.positional_args[0].value = 17
assert self.c.args["hrm"].value == self.c.positional_args[0].value
class deepcopy:
"__deepcopy__"
def setup(self):
self.arg = Argument("--boolean")
self.orig = Context(
name="mytask", args=(self.arg,), aliases=("othername",)
)
self.new = copy.deepcopy(self.orig)
def returns_correct_copy(self):
assert self.new is not self.orig
assert self.new.name == "mytask"
assert "othername" in self.new.aliases
def includes_arguments(self):
assert len(self.new.args) == 1
assert self.new.args["--boolean"] is not self.arg
def modifications_to_copied_arguments_do_not_touch_originals(self):
new_arg = self.new.args["--boolean"]
new_arg.value = True
assert new_arg.value
assert not self.arg.value
class help_for:
def setup(self):
# Normal, non-task/collection related Context
self.vanilla = Context(
args=(Argument("foo"), Argument("bar", help="bar the baz"))
)
# Task/Collection generated Context
# (will expose flags n such)
@task(help={"otherarg": "other help"}, optional=["optval"])
def mytask(c, myarg, otherarg, optval, intval=5):
pass
col = Collection(mytask)
self.tasked = col.to_contexts()[0]
def raises_ValueError_for_non_flag_values(self):
with raises(ValueError):
self.vanilla.help_for("foo")
def vanilla_no_helpstr(self):
assert self.vanilla.help_for("--foo") == ("--foo=STRING", "")
def vanilla_with_helpstr(self):
result = self.vanilla.help_for("--bar")
assert result == ("--bar=STRING", "bar the baz")
def task_driven_with_helpstr(self):
result = self.tasked.help_for("--otherarg")
assert result == ("-o STRING, --otherarg=STRING", "other help")
# Yes, the next 3 tests are identical in form, but technically they
# test different behaviors. HERPIN' AN' DERPIN'
def task_driven_no_helpstr(self):
result = self.tasked.help_for("--myarg")
assert result == ("-m STRING, --myarg=STRING", "")
def short_form_before_long_form(self):
result = self.tasked.help_for("--myarg")
assert result == ("-m STRING, --myarg=STRING", "")
def equals_sign_for_long_form_only(self):
result = self.tasked.help_for("--myarg")
assert result == ("-m STRING, --myarg=STRING", "")
def kind_to_placeholder_map(self):
# Strings
helpfor = self.tasked.help_for("--myarg")
assert helpfor == ("-m STRING, --myarg=STRING", "")
# Ints
helpfor = self.tasked.help_for("--intval")
assert helpfor == ("-i INT, --intval=INT", "")
# TODO: others
def shortflag_inputs_work_too(self):
m = self.tasked.help_for("-m")
myarg = self.tasked.help_for("--myarg")
assert m == myarg
def optional_values_use_brackets(self):
result = self.tasked.help_for("--optval")
assert result == ("-p [STRING], --optval[=STRING]", "")
def underscored_args(self):
c = Context(args=(Argument("i_have_underscores", help="yup"),))
result = c.help_for("--i-have-underscores")
assert result == ("--i-have-underscores=STRING", "yup")
def true_default_args(self):
c = Context(args=(Argument("truthy", kind=bool, default=True),))
assert c.help_for("--truthy") == ("--[no-]truthy", "")
class help_tuples:
def returns_list_of_help_tuples(self):
# Walks own list of flags/args, ensures resulting map to help_for()
# TODO: consider redoing help_for to be more flexible on input --
# arg value or flag; or even Argument objects. ?
@task(help={"otherarg": "other help"})
def mytask(c, myarg, otherarg):
pass
c = Collection(mytask).to_contexts()[0]
expected = [c.help_for("--myarg"), c.help_for("--otherarg")]
assert c.help_tuples() == expected
def _assert_order(self, name_tuples, expected_flag_order):
c = Context(args=[Argument(names=x) for x in name_tuples])
expected = [c.help_for(x) for x in expected_flag_order]
assert c.help_tuples() == expected
def sorts_alphabetically_by_shortflag_first(self):
# Where shortflags exist, they take precedence
self._assert_order(
[("zarg", "a"), ("arg", "z")], ["--zarg", "--arg"]
)
def case_ignored_during_sorting(self):
self._assert_order(
[("a",), ("B",)],
# In raw cmp() uppercase would come before lowercase,
# and we'd get ['-B', '-a']
["-a", "-B"],
)
def lowercase_wins_when_values_identical_otherwise(self):
self._assert_order([("V",), ("v",)], ["-v", "-V"])
def sorts_alphabetically_by_longflag_when_no_shortflag(self):
# Where no shortflag, sorts by longflag
self._assert_order(
[("otherarg",), ("longarg",)], ["--longarg", "--otherarg"]
)
def sorts_heterogenous_help_output_with_longflag_only_options_first(
self
): # noqa
# When both of the above mix, long-flag-only options come first.
# E.g.:
# --alpha
# --beta
# -a, --aaaagh
# -b, --bah
# -c
self._assert_order(
[("c",), ("a", "aaagh"), ("b", "bah"), ("beta",), ("alpha",)],
["--alpha", "--beta", "-a", "-b", "-c"],
)
def mixed_corelike_options(self):
self._assert_order(
[
("V", "version"),
("c", "collection"),
("h", "help"),
("l", "list"),
("r", "root"),
],
["-c", "-h", "-l", "-r", "-V"],
)
class missing_positional_args:
def represents_positional_args_missing_values(self):
arg1 = Argument("arg1", positional=True)
arg2 = Argument("arg2", positional=False)
arg3 = Argument("arg3", positional=True)
c = Context(name="foo", args=(arg1, arg2, arg3))
assert c.missing_positional_args == [arg1, arg3]
c.positional_args[0].value = "wat"
assert c.missing_positional_args == [arg3]
c.positional_args[1].value = "hrm"
assert c.missing_positional_args == []
class str:
"__str__"
def with_no_args_output_is_simple(self):
assert str(Context("foo")) == "<parser/Context 'foo'>"
def args_show_as_repr(self):
string = str(Context("bar", args=[Argument("arg1")]))
assert (
string == "<parser/Context 'bar': {'arg1': <Argument: arg1>}>"
) # noqa
| |
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.shortcuts import render
from django.views import defaults
from django.http import HttpResponse
from rest_framework import viewsets, mixins, status
from .models import Arch, SigKey, Label
from . import viewsets as pdc_viewsets
from .serializers import LabelSerializer, ArchSerializer, SigKeySerializer
from .filters import LabelFilter, SigKeyFilter
from . import handlers
class LabelViewSet(pdc_viewsets.PDCModelViewSet):
"""
##Overview##
This page shows the usage of the **Label API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
serializer_class = LabelSerializer
queryset = Label.objects.all().order_by('id')
filter_class = LabelFilter
def create(self, request, *args, **kwargs):
"""
### CREATE
__Method__:
POST
__URL__: $LINK:label-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X POST -d '{"name": "label1", "description": "label1 description"}' $URL:label-list$
# output
{"url": "$URL:label-detail:1$", "name": "label1", "description": "label1 description"}
"""
return super(LabelViewSet, self).create(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
"""
### LIST
__Method__:
GET
__URL__: $LINK:label-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X GET $URL:label-list$
# output
{
"count": 284,
"next": "$URL:label-list$?page=2",
"previous": null,
"results": [
{
"url": "$URL:label-detail:1$",
"name": "label1",
"description": "label1 description"
},
{
"url": "$URL:label-detail:2$",
"name": "label2",
"description": "label2 description"
},
...
]
}
With query params:
curl -H "Content-Type: application/json" -G $URL:label-list$ -d name=label1
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"url": "$URL:label-list:1$",
"name": "label1",
"description": "label1 description"
}
]
}
"""
return super(LabelViewSet, self).list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
### RETRIEVE
__Method__:
GET
__URL__: $LINK:label-detail:instance_pk$
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" $URL:label-detail:1$
# output
{"url": "$URL:label-detail:1$", "name": "label1", "description": "label1 description"}
"""
return super(LabelViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
### UPDATE
__Method__: `PUT`, `PATCH`
__URL__: $LINK:label-detail:instance_pk$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
PUT:
curl -X PUT -d '{"name": "new_name", "description": "new_description"}' -H "Content-Type: application/json" $URL:label-detail:1$
# output
{"url": "$URL:label-detail:1$", "name": "new_name", "description": "new_description"}
PATCH:
curl -X PATCH -d '{"description": "new_description"}' -H "Content-Type: application/json" $URL:label-detail:1$
# output
{"url": "$URL:label-detail:1$", "name": "label1", "description": "new_description"}
"""
return super(LabelViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
### DELETE
__Method__: `DELETE`
__URL__: $LINK:label-detail:instance_pk$
__Response__:
STATUS: 204 NO CONTENT
__Example__:
curl -X DELETE -H "Content-Type: application/json" $URL:label-detail:1$
"""
return super(LabelViewSet, self).destroy(request, *args, **kwargs)
class ArchViewSet(pdc_viewsets.ChangeSetCreateModelMixin,
pdc_viewsets.StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **Arch API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
serializer_class = ArchSerializer
queryset = Arch.objects.all().order_by('id')
lookup_field = 'name'
def list(self, request, *args, **kwargs):
"""
### LIST
__Method__:
GET
__URL__: $LINK:arch-list$
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X GET $URL:arch-list$
# output
{
"count": 47,
"next": "$URL:arch-list$?page=2",
"previous": null,
"results": [
{
"name": "alpha"
},
{
"name": "alphaev4",
},
...
]
}
"""
return super(ArchViewSet, self).list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""
### CREATE
__Method__:
POST
__URL__: $LINK:arch-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
__Example__:
curl -H "Content-Type: application/json" -X POST -d '{"name": "arm"}' $URL:arch-list$
# output
{"name": "arm"}
"""
return super(ArchViewSet, self).create(request, *args, **kwargs)
class SigKeyViewSet(pdc_viewsets.StrictQueryParamMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
pdc_viewsets.ChangeSetCreateModelMixin,
pdc_viewsets.ChangeSetUpdateModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **SigKey API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
serializer_class = SigKeySerializer
queryset = SigKey.objects.all().order_by('id')
filter_class = SigKeyFilter
lookup_field = 'key_id'
def list(self, request, *args, **kwargs):
"""
### LIST
__Method__:
GET
__URL__: $LINK:sigkey-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
return super(SigKeyViewSet, self).list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
### RETRIEVE
__Method__:
GET
__URL__: $LINK:sigkey-detail:key_id$
__Response__:
%(SERIALIZER)s
"""
return super(SigKeyViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
### UPDATE
__Method__: `PUT`, `PATCH`
%(WRITABLE_SERIALIZER)s
All keys are optional for `PATCH` request, but at least one must be
specified.
__URL__: $LINK:sigkey-detail:key_id$
__Response__:
%(SERIALIZER)s
"""
return super(SigKeyViewSet, self).update(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""
### CREATE
__Method__:
POST
__URL__: $LINK:sigkey-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(SigKeyViewSet, self).create(request, *args, **kwargs)
def home(request):
return render(request, "home/index.html")
def handle404(request):
if 'application/json' in request.META.get('HTTP_ACCEPT', ''):
return HttpResponse(json.dumps(handlers.NOT_FOUND_JSON_RESPONSE),
status=status.HTTP_404_NOT_FOUND,
content_type='application/json')
return defaults.page_not_found(request)
| |
import json
import logging
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from socketio import packet
from socketio import server
@mock.patch('engineio.Server')
class TestServer(unittest.TestCase):
def tearDown(self):
# restore JSON encoder, in case a test changed it
packet.Packet.json = json
def test_create(self, eio):
mgr = mock.MagicMock()
s = server.Server(mgr, binary=True, foo='bar')
mgr.assert_called_once_with(s)
eio.assert_called_once_with(**{'foo': 'bar'})
self.assertEqual(s.eio.on.call_count, 3)
self.assertEqual(s.binary, True)
def test_on_event(self, eio):
s = server.Server()
@s.on('connect')
def foo():
pass
def bar():
pass
s.on('disconnect', bar)
s.on('disconnect', bar, namespace='/foo')
self.assertEqual(s.handlers['/']['connect'], foo)
self.assertEqual(s.handlers['/']['disconnect'], bar)
self.assertEqual(s.handlers['/foo']['disconnect'], bar)
def test_emit(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.emit('my event', {'foo': 'bar'}, 'room', '123', namespace='/foo',
callback='cb')
s.manager.emit.assert_called_once_with('my event', {'foo': 'bar'},
'/foo', 'room', '123', 'cb')
def test_emit_default_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.emit('my event', {'foo': 'bar'}, 'room', '123', callback='cb')
s.manager.emit.assert_called_once_with('my event', {'foo': 'bar'}, '/',
'room', '123', 'cb')
def test_send(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.send('foo', 'room', '123', namespace='/foo', callback='cb')
s.manager.emit.assert_called_once_with('message', 'foo', '/foo',
'room', '123', 'cb')
def test_enter_room(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.enter_room('123', 'room', namespace='/foo')
s.manager.enter_room.assert_called_once_with('123', '/foo', 'room')
def test_enter_room_default_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.enter_room('123', 'room')
s.manager.enter_room.assert_called_once_with('123', '/', 'room')
def test_leave_room(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.leave_room('123', 'room', namespace='/foo')
s.manager.leave_room.assert_called_once_with('123', '/foo', 'room')
def test_leave_room_default_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.leave_room('123', 'room')
s.manager.leave_room.assert_called_once_with('123', '/', 'room')
def test_close_room(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.close_room('room', namespace='/foo')
s.manager.close_room.assert_called_once_with('/foo', 'room')
def test_close_room_default_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.close_room('room')
s.manager.close_room.assert_called_once_with('/', 'room')
def test_rooms(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.rooms('123', namespace='/foo')
s.manager.get_rooms.assert_called_once_with('123', '/foo')
def test_rooms_default_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.rooms('123')
s.manager.get_rooms.assert_called_once_with('123', '/')
def test_handle_request(self, eio):
s = server.Server()
s.handle_request('environ', 'start_response')
s.eio.handle_request.assert_called_once_with('environ',
'start_response')
def test_emit_internal(self, eio):
s = server.Server()
s._emit_internal('123', 'my event', 'my data', namespace='/foo')
s.eio.send.assert_called_once_with('123',
'2/foo,["my event","my data"]',
binary=False)
def test_emit_internal_with_callback(self, eio):
s = server.Server()
id = s.manager._generate_ack_id('123', '/foo', 'cb')
s._emit_internal('123', 'my event', 'my data', namespace='/foo', id=id)
s.eio.send.assert_called_once_with('123',
'2/foo,1["my event","my data"]',
binary=False)
def test_emit_internal_default_namespace(self, eio):
s = server.Server()
s._emit_internal('123', 'my event', 'my data')
s.eio.send.assert_called_once_with('123', '2["my event","my data"]',
binary=False)
def test_emit_internal_binary(self, eio):
s = server.Server(binary=True)
s._emit_internal('123', u'my event', b'my binary data')
self.assertEqual(s.eio.send.call_count, 2)
def test_transport(self, eio):
s = server.Server()
s.eio.transport = mock.MagicMock(return_value='polling')
s._handle_eio_connect('foo', 'environ')
self.assertEqual(s.transport('foo'), 'polling')
s.eio.transport.assert_called_once_with('foo')
def test_handle_connect(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock()
s.on('connect', handler)
s._handle_eio_connect('123', 'environ')
handler.assert_called_once_with('123', 'environ')
s.manager.connect.assert_called_once_with('123', '/')
s.eio.send.assert_called_once_with('123', '0', binary=False)
def test_handle_connect_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock()
s.on('connect', handler, namespace='/foo')
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
handler.assert_called_once_with('123', 'environ')
s.manager.connect.assert_any_call('123', '/')
s.manager.connect.assert_any_call('123', '/foo')
s.eio.send.assert_any_call('123', '0/foo', binary=False)
def test_handle_connect_rejected(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock(return_value=False)
s.on('connect', handler)
s._handle_eio_connect('123', 'environ')
handler.assert_called_once_with('123', 'environ')
self.assertEqual(s.manager.connect.call_count, 1)
self.assertEqual(s.manager.disconnect.call_count, 1)
s.eio.send.assert_called_once_with('123', '4', binary=False)
def test_handle_connect_namespace_rejected(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock(return_value=False)
s.on('connect', handler, namespace='/foo')
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
self.assertEqual(s.manager.connect.call_count, 2)
self.assertEqual(s.manager.disconnect.call_count, 1)
s.eio.send.assert_any_call('123', '4/foo', binary=False)
def test_handle_disconnect(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock()
s.on('disconnect', handler)
s._handle_eio_connect('123', 'environ')
s._handle_eio_disconnect('123')
handler.assert_called_once_with('123')
s.manager.disconnect.assert_called_once_with('123', '/')
self.assertEqual(s.environ, {})
def test_handle_disconnect_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.manager.get_namespaces = mock.MagicMock(return_value=['/', '/foo'])
handler = mock.MagicMock()
s.on('disconnect', handler)
handler_namespace = mock.MagicMock()
s.on('disconnect', handler_namespace, namespace='/foo')
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
s._handle_eio_disconnect('123')
handler.assert_called_once_with('123')
handler_namespace.assert_called_once_with('123')
self.assertEqual(s.environ, {})
def test_handle_disconnect_only_namespace(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s.manager.get_namespaces = mock.MagicMock(return_value=['/', '/foo'])
handler = mock.MagicMock()
s.on('disconnect', handler)
handler_namespace = mock.MagicMock()
s.on('disconnect', handler_namespace, namespace='/foo')
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
s._handle_eio_message('123', '1/foo')
self.assertEqual(handler.call_count, 0)
handler_namespace.assert_called_once_with('123')
self.assertEqual(s.environ, {'123': 'environ'})
def test_handle_disconnect_unknown_client(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
s._handle_eio_disconnect('123')
def test_handle_event(self, eio):
s = server.Server()
handler = mock.MagicMock()
s.on('my message', handler)
s._handle_eio_message('123', '2["my message","a","b","c"]')
handler.assert_called_once_with('123', 'a', 'b', 'c')
def test_handle_event_with_namespace(self, eio):
s = server.Server()
handler = mock.MagicMock()
s.on('my message', handler, namespace='/foo')
s._handle_eio_message('123', '2/foo,["my message","a","b","c"]')
handler.assert_called_once_with('123', 'a', 'b', 'c')
def test_handle_event_binary(self, eio):
s = server.Server()
handler = mock.MagicMock()
s.on('my message', handler)
s._handle_eio_message('123', '52-["my message","a",'
'{"_placeholder":true,"num":1},'
'{"_placeholder":true,"num":0}]')
self.assertEqual(s._attachment_count, 2)
s._handle_eio_message('123', b'foo')
self.assertEqual(s._attachment_count, 1)
s._handle_eio_message('123', b'bar')
self.assertEqual(s._attachment_count, 0)
handler.assert_called_once_with('123', 'a', b'bar', b'foo')
def test_handle_event_binary_ack(self, eio):
s = server.Server()
s._handle_eio_message('123', '61-1["my message","a",'
'{"_placeholder":true,"num":0}]')
self.assertEqual(s._attachment_count, 1)
self.assertRaises(ValueError, s._handle_eio_message, '123', b'foo')
def test_handle_event_with_ack(self, eio):
s = server.Server()
handler = mock.MagicMock(return_value='foo')
s.on('my message', handler)
s._handle_eio_message('123', '21000["my message","foo"]')
handler.assert_called_once_with('123', 'foo')
s.eio.send.assert_called_once_with('123', '31000["foo"]',
binary=False)
def test_handle_event_with_ack_tuple(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock(return_value=(1, '2', True))
s.on('my message', handler)
s._handle_eio_message('123', '21000["my message","a","b","c"]')
handler.assert_called_once_with('123', 'a', 'b', 'c')
s.eio.send.assert_called_once_with('123', '31000[1,"2",true]',
binary=False)
def test_handle_event_with_ack_list(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr)
handler = mock.MagicMock(return_value=[1, '2', True])
s.on('my message', handler)
s._handle_eio_message('123', '21000["my message","a","b","c"]')
handler.assert_called_once_with('123', 'a', 'b', 'c')
s.eio.send.assert_called_once_with('123', '31000[1,"2",true]',
binary=False)
def test_handle_event_with_ack_binary(self, eio):
mgr = mock.MagicMock()
s = server.Server(client_manager_class=mgr, binary=True)
handler = mock.MagicMock(return_value=b'foo')
s.on('my message', handler)
s._handle_eio_message('123', '21000["my message","foo"]')
handler.assert_any_call('123', 'foo')
def test_handle_error_packet(self, eio):
s = server.Server()
self.assertRaises(ValueError, s._handle_eio_message, '123', '4')
def test_handle_invalid_packet(self, eio):
s = server.Server()
self.assertRaises(ValueError, s._handle_eio_message, '123', '9')
def test_send_with_ack(self, eio):
s = server.Server()
s._handle_eio_connect('123', 'environ')
cb = mock.MagicMock()
id1 = s.manager._generate_ack_id('123', '/', cb)
id2 = s.manager._generate_ack_id('123', '/', cb)
s._emit_internal('123', 'my event', ['foo'], id=id1)
s._emit_internal('123', 'my event', ['bar'], id=id2)
s._handle_eio_message('123', '31["foo",2]')
cb.assert_called_once_with('foo', 2)
def test_send_with_ack_namespace(self, eio):
s = server.Server()
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
cb = mock.MagicMock()
id = s.manager._generate_ack_id('123', '/foo', cb)
s._emit_internal('123', 'my event', ['foo'], namespace='/foo',
id=id)
s._handle_eio_message('123', '3/foo,1["foo",2]')
cb.assert_called_once_with('foo', 2)
def test_disconnect(self, eio):
s = server.Server()
s._handle_eio_connect('123', 'environ')
s.disconnect('123')
s.eio.send.assert_any_call('123', '1', binary=False)
def test_disconnect_namespace(self, eio):
s = server.Server()
s._handle_eio_connect('123', 'environ')
s._handle_eio_message('123', '0/foo')
s.disconnect('123', namespace='/foo')
s.eio.send.assert_any_call('123', '1/foo', binary=False)
def test_logger(self, eio):
s = server.Server(logger=False)
self.assertEqual(s.logger.getEffectiveLevel(), logging.ERROR)
s.logger.setLevel(logging.NOTSET)
s = server.Server(logger=True)
self.assertEqual(s.logger.getEffectiveLevel(), logging.INFO)
s.logger.setLevel(logging.WARNING)
s = server.Server(logger=True)
self.assertEqual(s.logger.getEffectiveLevel(), logging.WARNING)
s.logger.setLevel(logging.NOTSET)
s = server.Server(logger='foo')
self.assertEqual(s.logger, 'foo')
def test_engineio_logger(self, eio):
server.Server(engineio_logger='foo')
eio.assert_called_once_with(**{'logger': 'foo'})
def test_custom_json(self, eio):
# Warning: this test cannot run in parallel with other tests, as it
# changes the JSON encoding/decoding functions
class CustomJSON(object):
@staticmethod
def dumps(*args, **kwargs):
return '*** encoded ***'
@staticmethod
def loads(*args, **kwargs):
return '+++ decoded +++'
server.Server(json=CustomJSON)
eio.assert_called_once_with(**{'json': CustomJSON})
pkt = packet.Packet(packet_type=packet.EVENT,
data={six.text_type('foo'): six.text_type('bar')})
self.assertEqual(pkt.encode(), '2*** encoded ***')
pkt2 = packet.Packet(encoded_packet=pkt.encode())
self.assertEqual(pkt2.data, '+++ decoded +++')
# restore the default JSON module
packet.Packet.json = json
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Implements Docker operator
"""
import json
import ast
from docker import APIClient, tls
from airflow.hooks.docker_hook import DockerHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.file import TemporaryDirectory
class DockerOperator(BaseOperator):
"""
Execute a command inside a docker container.
A temporary directory is created on the host and
mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container.
The path to the mounted directory can be accessed
via the environment variable ``AIRFLOW_TMP_DIR``.
If a login to a private registry is required prior to pulling the image, a
Docker connection needs to be configured in Airflow and the connection ID
be provided with the parameter ``docker_conn_id``.
:param image: Docker image from which to create the container.
If image tag is omitted, "latest" will be used.
:type image: str
:param api_version: Remote API version. Set to ``auto`` to automatically
detect the server's version.
:type api_version: str
:param auto_remove: Auto-removal of the container on daemon side when the
container's process exits.
The default is False.
:type auto_remove: bool
:param command: Command to be run in the container. (templated)
:type command: str or list
:param container_name: Name of the container. Optional (templated)
:type container_name: str or None
:param cpus: Number of CPUs to assign to the container.
This value gets multiplied with 1024. See
https://docs.docker.com/engine/reference/run/#cpu-share-constraint
:type cpus: float
:param dns: Docker custom DNS servers
:type dns: list[str]
:param dns_search: Docker custom DNS search domain
:type dns_search: list[str]
:param docker_url: URL of the host running the docker daemon.
Default is unix://var/run/docker.sock
:type docker_url: str
:param environment: Environment variables to set in the container. (templated)
:type environment: dict
:param force_pull: Pull the docker image on every run. Default is False.
:type force_pull: bool
:param mem_limit: Maximum amount of memory the container can use.
Either a float value, which represents the limit in bytes,
or a string like ``128m`` or ``1g``.
:type mem_limit: float or str
:param host_tmp_dir: Specify the location of the temporary directory on the host which will
be mapped to tmp_dir. If not provided defaults to using the standard system temp directory.
:type host_tmp_dir: str
:param network_mode: Network mode for the container.
:type network_mode: str
:param tls_ca_cert: Path to a PEM-encoded certificate authority
to secure the docker connection.
:type tls_ca_cert: str
:param tls_client_cert: Path to the PEM-encoded certificate
used to authenticate docker client.
:type tls_client_cert: str
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:type tls_client_key: str
:param tls_hostname: Hostname to match against
the docker server certificate or False to disable the check.
:type tls_hostname: str or bool
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:type tls_ssl_version: str
:param tmp_dir: Mount point inside the container to
a temporary directory created on the host by the operator.
The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:type tmp_dir: str
:param user: Default user inside the docker container.
:type user: int or str
:param volumes: List of volumes to mount into the container, e.g.
``['/host/path:/container/path', '/host/path2:/container/path2:ro']``.
:type volumes: list
:param working_dir: Working directory to
set on the container (equivalent to the -w switch the docker client)
:type working_dir: str
:param xcom_push: Does the stdout will be pushed to the next step using XCom.
The default is False.
:type xcom_push: bool
:param xcom_all: Push all the stdout or just the last line.
The default is False (last line).
:type xcom_all: bool
:param docker_conn_id: ID of the Airflow connection to use
:type docker_conn_id: str
:param shm_size: Size of ``/dev/shm`` in bytes. The size must be
greater than 0. If omitted uses system default.
:type shm_size: int
:param tty: Allocate pseudo-TTY to the container
This needs to be set see logs of the Docker container.
:type tty: bool
"""
template_fields = ('command', 'environment', 'container_name')
template_ext = ('.sh', '.bash',)
@apply_defaults
def __init__(
self,
image,
api_version=None,
command=None,
container_name=None,
cpus=1.0,
docker_url='unix://var/run/docker.sock',
environment=None,
force_pull=False,
mem_limit=None,
host_tmp_dir=None,
network_mode=None,
tls_ca_cert=None,
tls_client_cert=None,
tls_client_key=None,
tls_hostname=None,
tls_ssl_version=None,
tmp_dir='/tmp/airflow',
user=None,
volumes=None,
working_dir=None,
xcom_push=False,
xcom_all=False,
docker_conn_id=None,
dns=None,
dns_search=None,
auto_remove=False,
shm_size=None,
tty=False,
*args,
**kwargs):
super(DockerOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.auto_remove = auto_remove
self.command = command
self.container_name = container_name
self.cpus = cpus
self.dns = dns
self.dns_search = dns_search
self.docker_url = docker_url
self.environment = environment or {}
self.force_pull = force_pull
self.image = image
self.mem_limit = mem_limit
self.host_tmp_dir = host_tmp_dir
self.network_mode = network_mode
self.tls_ca_cert = tls_ca_cert
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_hostname = tls_hostname
self.tls_ssl_version = tls_ssl_version
self.tmp_dir = tmp_dir
self.user = user
self.volumes = volumes or []
self.working_dir = working_dir
self.xcom_push_flag = xcom_push
self.xcom_all = xcom_all
self.docker_conn_id = docker_conn_id
self.shm_size = shm_size
self.tty = tty
self.cli = None
self.container = None
def get_hook(self):
return DockerHook(
docker_conn_id=self.docker_conn_id,
base_url=self.docker_url,
version=self.api_version,
tls=self.__get_tls_config()
)
def _run_image(self):
"""
Run a Docker container with the provided image
"""
self.log.info('Starting docker container from image %s', self.image)
with TemporaryDirectory(prefix='airflowtmp', dir=self.host_tmp_dir) as host_tmp_dir:
self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_command(),
name=self.container_name,
environment=self.environment,
host_config=self.cli.create_host_config(
auto_remove=self.auto_remove,
binds=self.volumes,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
mem_limit=self.mem_limit),
image=self.image,
user=self.user,
working_dir=self.working_dir,
tty=self.tty,
)
self.cli.start(self.container['Id'])
line = ''
for line in self.cli.attach(container=self.container['Id'],
stdout=True,
stderr=True,
stream=True):
line = line.strip()
if hasattr(line, 'decode'):
line = line.decode('utf-8')
self.log.info(line)
result = self.cli.wait(self.container['Id'])
if result['StatusCode'] != 0:
raise AirflowException('docker container failed: ' + repr(result))
if self.xcom_push_flag:
return self.cli.logs(container=self.container['Id']) \
if self.xcom_all else str(line)
def execute(self, context):
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = APIClient(
base_url=self.docker_url,
version=self.api_version,
tls=tls_config
)
# Pull the docker image if `force_pull` is set or image does not exist locally
if self.force_pull or len(self.cli.images(name=self.image)) == 0:
self.log.info('Pulling docker image %s', self.image)
for l in self.cli.pull(self.image, stream=True, decode=True):
output = json.loads(l.decode('utf-8').strip())
if 'status' in output:
self.log.info("%s", output['status'])
self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir
return self._run_image()
def get_command(self):
"""
Retrieve command(s). if command string starts with [, it returns the command list)
:return: the command (or commands)
:rtype: str | List[str]
"""
if isinstance(self.command, str) and self.command.strip().find('[') == 0:
commands = ast.literal_eval(self.command)
else:
commands = self.command
return commands
def on_kill(self):
if self.cli is not None:
self.log.info('Stopping docker container')
self.cli.stop(self.container['Id'])
def __get_tls_config(self):
tls_config = None
if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key:
# Ignore type error on SSL version here - it is deprecated and type annotation is wrong
# it should be string
# noinspection PyTypeChecker
tls_config = tls.TLSConfig(
ca_cert=self.tls_ca_cert,
client_cert=(self.tls_client_cert, self.tls_client_key),
verify=True,
ssl_version=self.tls_ssl_version, # type: ignore
assert_hostname=self.tls_hostname
)
self.docker_url = self.docker_url.replace('tcp://', 'https://')
return tls_config
| |
"""Storage methods and utilities"""
import getpass
import os
import logging
import re
import shutil
import time
from datetime import datetime
from multiprocessing import Pool
from statusdb.db import connections as statusdb
from taca.utils.config import CONFIG
from taca.utils import filesystem, misc
logger = logging.getLogger(__name__)
def cleanup_nas(days):
"""Will move the finished runs in NASes to nosync directory.
:param int days: Number of days to consider a run to be old
"""
for data_dir in CONFIG.get('storage').get('data_dirs'):
logger.info('Moving old runs in {}'.format(data_dir))
with filesystem.chdir(data_dir):
for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
rta_file = os.path.join(run, 'RTAComplete.txt')
if os.path.exists(rta_file):
# 1 day == 60*60*24 seconds --> 86400
if os.stat(rta_file).st_mtime < time.time() - (86400 * days):
logger.info('Moving run {} to nosync directory'
.format(os.path.basename(run)))
shutil.move(run, 'nosync')
else:
logger.info('RTAComplete.txt file exists but is not older than {} day(s), skipping run {}'.format(str(days), run))
def cleanup_processing(days):
"""Cleanup runs in processing server.
:param int days: Number of days to consider a run to be old
"""
transfer_file = os.path.join(CONFIG.get('preprocessing', {}).get('status_dir'), 'transfer.tsv')
if not days:
days = CONFIG.get('cleanup', {}).get('processing-server', {}).get('days', 10)
try:
#Move finished runs to nosync
for data_dir in CONFIG.get('storage').get('data_dirs'):
logger.info('Moving old runs in {}'.format(data_dir))
with filesystem.chdir(data_dir):
for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
if filesystem.is_in_file(transfer_file, run):
logger.info('Moving run {} to nosync directory'
.format(os.path.basename(run)))
shutil.move(run, 'nosync')
else:
logger.info(("Run {} has not been transferred to the analysis "
"server yet, not archiving".format(run)))
#Remove old runs from archiving dirs
for archive_dir in CONFIG.get('storage').get('archive_dirs').values():
logger.info('Removing old runs in {}'.format(archive_dir))
with filesystem.chdir(archive_dir):
for run in [r for r in os.listdir(archive_dir) if re.match(filesystem.RUN_RE, r)]:
rta_file = os.path.join(run, 'RTAComplete.txt')
if os.path.exists(rta_file):
# 1 day == 60*60*24 seconds --> 86400
if os.stat(rta_file).st_mtime < time.time() - (86400 * days) and \
filesystem.is_in_swestore("{}.tar.bz2".format(run)):
logger.info('Removing run {} to nosync directory'
.format(os.path.basename(run)))
shutil.rmtree(run)
else:
logger.info('RTAComplete.txt file exists but is not older than {} day(s), skipping run {}'.format(str(days), run))
except IOError:
sbj = "Cannot archive old runs in processing server"
msg = ("Could not find transfer.tsv file, so I cannot decide if I should "
"archive any run or not.")
cnt = CONFIG.get('contact', None)
if not cnt:
cnt = "{}@localhost".format(getpass.getuser())
logger.error(msg)
misc.send_mail(sbj, msg, cnt)
def archive_to_swestore(days, run=None, max_runs=None, force=False, compress_only=False):
"""Send runs (as archives) in NAS nosync to swestore for backup
:param int days: number fo days to check threshold
:param str run: specific run to send swestore
:param int max_runs: number of runs to be processed simultaneously
:param bool force: Force the archiving even if the run is not complete
:param bool compress_only: Compress the run without sending it to swestore
"""
# If the run is specified in the command line, check that exists and archive
if run:
run = os.path.basename(run)
base_dir = os.path.dirname(run)
if re.match(filesystem.RUN_RE, run):
# If the parameter is not an absolute path, find the run in the archive_dirs
if not base_dir:
for archive_dir in CONFIG.get('storage').get('archive_dirs'):
if os.path.exists(os.path.join(archive_dir, run)):
base_dir = archive_dir
if not os.path.exists(os.path.join(base_dir, run)):
logger.error(("Run {} not found. Please make sure to specify "
"the absolute path or relative path being in "
"the correct directory.".format(run)))
else:
with filesystem.chdir(base_dir):
_archive_run((run, days, force, compress_only))
else:
logger.error("The name {} doesn't look like an Illumina run"
.format(os.path.basename(run)))
# Otherwise find all runs in every data dir on the nosync partition
else:
logger.info("Archiving old runs to SWESTORE")
for to_send_dir in CONFIG.get('storage').get('archive_dirs'):
logger.info('Checking {} directory'.format(to_send_dir))
with filesystem.chdir(to_send_dir):
to_be_archived = [r for r in os.listdir(to_send_dir)
if re.match(filesystem.RUN_RE, r)
and not os.path.exists("{}.archiving".format(r.split('.')[0]))]
if to_be_archived:
pool = Pool(processes=len(to_be_archived) if not max_runs else max_runs)
pool.map_async(_archive_run, ((run, days, force, compress_only) for run in to_be_archived))
pool.close()
pool.join()
else:
logger.info('No old runs to be archived')
def cleanup_swestore(days, dry_run=False):
"""Remove archived runs from swestore
:param int days: Threshold days to check and remove
"""
days = check_days('swestore', days, config)
if not days:
return
runs = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'))
for run in runs:
date = run.split('_')[0]
if misc.days_old(date) > days:
if dry_run:
logger.info('Will remove file {} from swestore'.format(run))
continue
misc.call_external_command('irm -f {}'.format(run))
logger.info('Removed file {} from swestore'.format(run))
def cleanup_uppmax(site, days, dry_run=False):
"""Remove project/run that have been closed more than 'days'
from the given 'site' on uppmax
:param str site: site where the cleanup should be performed
:param int days: number of days to check for closed projects
"""
days = check_days(site, days, config)
if not days:
return
root_dir = CONFIG.get('cleanup').get(site).get('root')
deleted_log = CONFIG.get('cleanup').get('deleted_log')
assert os.path.exists(os.path.join(root_dir,deleted_log)), "Log directory {} doesn't exist in {}".format(deleted_log,root_dir)
log_file = os.path.join(root_dir,"{fl}/{fl}.log".format(fl=deleted_log))
# make a connection for project db #
pcon = statusdb.ProjectSummaryConnection()
assert pcon, "Could not connect to project database in StatusDB"
if site != "archive":
## work flow for cleaning up illumina/analysis ##
projects = [ p for p in os.listdir(root_dir) if re.match(filesystem.PROJECT_RE,p) ]
list_to_delete = get_closed_projects(projects, pcon, days)
else:
##work flow for cleaning archive ##
list_to_delete = []
archived_in_swestore = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'), no_ext=True)
runs = [ r for r in os.listdir(root_dir) if re.match(filesystem.RUN_RE,r) ]
with filesystem.chdir(root_dir):
for run in runs:
fc_date = run.split('_')[0]
if misc.days_old(fc_date) > days:
if run in archived_in_swestore:
list_to_delete.append(run)
else:
logger.warn("Run {} is older than {} days but not in "
"swestore, so SKIPPING".format(run, days))
## delete and log
for item in list_to_delete:
if dry_run:
logger.info('Will remove {} from {}'.format(item,root_dir))
continue
try:
shutil.rmtree(os.path.join(root_dir,item))
logger.info('Removed project {} from {}'.format(item,root_dir))
with open(log_file,'a') as to_log:
to_log.write("{}\t{}\n".format(item,datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M')))
except OSError:
logger.warn("Could not remove path {} from {}"
.format(item,root_dir))
continue
#############################################################
# Class helper methods, not exposed as commands/subcommands #
#############################################################
def _archive_run((run, days, force, compress_only)):
""" Archive a specific run to swestore
:param str run: Run directory
:param int days: Days to consider a run old
:param bool force: Force the archiving even if the run is not complete
:param bool compress_only: Only compress the run without sending it to swestore
"""
def _send_to_swestore(f, dest, remove=True):
""" Send file to swestore checking adler32 on destination and eventually
removing the file from disk
:param str f: File to remove
:param str dest: Destination directory in Swestore
:param bool remove: If True, remove original file from source
"""
if not filesystem.is_in_swestore(f):
logger.info("Sending {} to swestore".format(f))
misc.call_external_command('iput -K -P {file} {dest}'.format(file=f, dest=dest),
with_log_files=True)
logger.info('Run {} sent correctly and checksum was okay.'.format(f))
if remove:
logger.info('Removing run'.format(f))
os.remove(f)
else:
logger.warn('Run {} is already in Swestore, not sending it again nor removing from the disk'.format(f))
# Create state file to say that the run is being archived
open("{}.archiving".format(run.split('.')[0]), 'w').close()
if run.endswith('bz2'):
if os.stat(run).st_mtime < time.time() - (86400 * days):
_send_to_swestore(run, CONFIG.get('storage').get('irods').get('irodsHome'))
else:
logger.info("Run {} is not {} days old yet. Not archiving".format(run, str(days)))
else:
rta_file = os.path.join(run, 'RTAComplete.txt')
if not os.path.exists(rta_file) and not force:
logger.warn(("Run {} doesn't seem to be completed and --force option was "
"not enabled, not archiving the run".format(run)))
if force or (os.path.exists(rta_file) and os.stat(rta_file).st_mtime < time.time() - (86400 * days)):
logger.info("Compressing run {}".format(run))
# Compress with pbzip2
misc.call_external_command('tar --use-compress-program=pbzip2 -cf {run}.tar.bz2 {run}'.format(run=run))
logger.info('Run {} successfully compressed! Removing from disk...'.format(run))
shutil.rmtree(run)
if not compress_only:
_send_to_swestore('{}.tar.bz2'.format(run), CONFIG.get('storage').get('irods').get('irodsHome'))
else:
logger.info("Run {} is not completed or is not {} days old yet. Not archiving".format(run, str(days)))
os.remove("{}.archiving".format(run.split('.')[0]))
def get_closed_projects(projs, pj_con, days):
"""Takes list of project and gives project list that are closed
more than given check 'days'
:param list projs: list of projects to check
:param obj pj_con: connection object to project database
:param int days: number of days to check
"""
closed_projs = []
for proj in projs:
if proj not in pj_con.name_view.keys():
logger.warn("Project {} is not in database, so SKIPPING it.."
.format(proj))
continue
proj_db_obj = pj_con.get_entry(proj)
try:
proj_close_date = proj_db_obj['close_date']
except KeyError:
logger.warn("Project {} is either open or too old, so SKIPPING it..".format(proj))
continue
if misc.days_old(proj_close_date,date_format='%Y-%m-%d') > days:
closed_projs.append(proj)
return closed_projs
def check_days(site, days, config):
"""Check if 'days' given while running command. If not take the default threshold
from config file (which should exist). Also when 'days' given on the command line
raise a check to make sure it was really meant to do so
:param str site: site to be cleaned and relevent date to pick
:param int days: number of days to check, will be None if '-d' not used
:param dict config: config file parsed and saved as dictionary
"""
try:
default_days = config['cleanup'][site]['days']
except KeyError:
raise
if not days:
return default_days
elif days >= default_days:
return days
else:
if misc.query_yes_no("Seems like given days({}) is less than the "
" default({}), are you sure to proceed ?"
.format(days,default_days), default="no"):
return days
else:
return None
| |
"""RPM dependency lines parser and helpers.
Contains class DependencyParser which parses string and generates
token tree. For common manipulation is method flat_out() useful, it
just splits dependencies into list.
For future development is useful find_end_of_macro().
"""
import re
import logging
from .rpmexception import NoMatchException
DEBUG = None
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
re_parens = re.compile(
r'(' +
r'\(' + r'|' + r'\)' + r'|' +
r'\\(' + r'|' + r'\\)' + r'|' +
r'[^\()]+' +
r')'
)
re_braces = re.compile(
r'(' +
r'\{' + r'|' + r'\}' + r'|' +
r'\\{' + r'|' + r'\\}' + r'|' +
r'[^\{}]+' +
r')'
)
re_name = re.compile(r'[-A-Za-z0-9_~():.+/*]+')
re_version = re.compile(r'[-A-Za-z0-9_~():.+]+')
re_spaces = re.compile(r'\s+')
re_macro_unbraced = re.compile('%[A-Za-z0-9_]{3,}')
re_version_operator = re.compile('(>=|<=|=>|=<|>|<|=)')
def find_end_of_macro(string, regex, opening, closing):
if DEBUG:
logger = logging.getLogger('DepParser')
else:
logger = None
macro = string[0:2]
# eat '%{'
string = string[2:]
opened = 1
while opened and string:
if logger:
logger.debug('opened: %d string: %s', opened, string)
try:
bite, string = consume_chars(regex, string, logger)
except NoMatchException:
raise Exception('unexpected parser error when looking for end of '
'macro')
if bite == opening:
opened += 1
elif bite == closing:
opened -= 1
macro += bite
if opened:
raise Exception('Unexpectedly met end of string when looking for end '
'of macro')
return macro
def consume_chars(regex, string, logger=None):
if logger:
logger.debug('consume_chars: regex: "%s"', regex.pattern)
logger.debug('consume_chars: string:"%s"', string)
match = regex.match(string)
if match:
end = match.end()
if logger:
logger.debug('consume_chars: split "%s", "%s"', string[0:end], string[end:])
return string[0:end], string[end:]
else:
raise NoMatchException('Expected match failed')
class DependencyParser(object):
logger = None
def __init__(self, string):
self.string = string.rstrip()
self.token = []
self.parsed = []
self.state = ['name']
if DEBUG:
self.logger = logging.getLogger('DepParser')
self.logger.setLevel(logging.DEBUG)
self.state_change_loop()
def dump_token(self):
if self.logger:
self.logger.debug('dump_token')
self.status()
if not self.token:
return
if self.token[0].isspace():
self.token = self.token[1:]
if not self.token:
return
self.parsed.append(self.token)
self.token = []
def state_change_loop(self):
while self.string:
if self.state[-1] == 'name':
self.read_name()
elif self.state[-1] == 'version_operator':
self.read_version_operator()
elif self.state[-1] == 'version':
self.read_version()
elif self.state[-1] == 'macro_name':
self.read_macro_name()
elif self.state[-1] == 'macro_shell':
self.read_macro_shell()
elif self.state[-1] == 'macro_unbraced':
self.read_macro_unbraced()
elif self.state[-1] == 'spaces':
self.read_spaces()
self.dump_token()
def status(self):
if self.logger:
self.logger.debug('token: %s', self.token)
self.logger.debug('string: "%s"', self.string)
self.logger.debug('parsed: %s', self.parsed)
self.logger.debug('state: %s', self.state)
self.logger.debug('--------------------------------')
def read_spaces(self, state_change=True):
try:
spaces, self.string = consume_chars(re_spaces, self.string, self.logger)
self.token.append(spaces)
if state_change:
self.state.pop() # remove 'spaces' state
# if we were reading version, space definitely means
# end of that
if self.state[-1] == 'version':
self.state.pop()
self.dump_token()
self.status()
except NoMatchException:
pass
def read_unknown(self):
'''
Try to identify, what is to be read now.
'''
if self.string[0:2] in ['>=', '<=', '=>', '=<'] or \
self.string[0:1] in ['<', '>', '=']:
self.state.append('version')
self.state.append('version_operator')
elif self.string[0] == ' ':
self.state.append('spaces')
elif self.string[0:2] == '%{':
self.state.append('macro_name')
elif self.string[0:2] == '%(':
self.state.append('macro_shell')
elif self.string[0:2] == '%%':
self.read_double_percent()
elif self.string[0] == '%':
self.state.append('macro_unbraced')
elif self.string[0] == ',':
self.string = self.string[1:]
self.dump_token()
if self.logger:
self.logger.debug('read_unknown: states: %s string: "%s"',
self.state, self.string)
def read_name(self):
try:
name, self.string = consume_chars(re_name, self.string, self.logger)
if self.token and self.token[-1].isspace():
self.dump_token()
self.token.append(name)
self.status()
except NoMatchException:
self.read_unknown()
def read_double_percent(self):
self.token.append('%%')
self.string = self.string[2:]
def read_macro_unbraced(self):
try:
# 3 or more alphanumeric characters
macro, self.string = consume_chars(
re_macro_unbraced, self.string, self.logger)
self.token.append(macro)
self.state.pop() # remove 'macro_unbraced' state
self.status()
except NoMatchException:
self.read_unknown()
def read_version_operator(self):
try:
operator, self.string = consume_chars(
re_version_operator, self.string, self.logger)
self.token.append(operator)
# Note: this part is a bit tricky, I need to read possible
# spaces or tabs now so I won't get to [ ..., 'version',
# 'spaces' ] state before the end
self.read_spaces(state_change=False)
self.state.pop() # get rid of 'version_operator'
self.status()
except NoMatchException:
self.read_unknown()
def read_version(self):
try:
version, self.string = consume_chars(
re_version, self.string, self.logger)
self.token.append(version)
self.status()
except NoMatchException:
self.read_unknown()
def read_macro_name(self):
macro = find_end_of_macro(self.string, re_braces, '{', '}')
# remove macro from string
self.string = self.string[len(macro):]
self.token.append(macro)
# now we expect previous state
self.state.pop()
self.status()
def read_macro_shell(self):
macro = find_end_of_macro(self.string, re_parens, '(', ')')
self.string = self.string[len(macro):]
self.token.append(macro)
# now we expect previous state
self.state.pop()
self.status()
def flat_out(self):
result = []
for token in self.parsed:
if isinstance(token, list):
if token and token[-1].isspace():
token = token[:-1]
result.append(''.join(token))
else:
if not token.isspace():
result.append(token)
return result
| |
# 3p
from mock import Mock
# project
from tests.checks.common import AgentCheckTest
from tests.core.test_wmi import SWbemServices, TestCommonWMI
class WMITestCase(AgentCheckTest, TestCommonWMI):
CHECK_NAME = 'wmi_check'
WMI_CONNECTION_CONFIG = {
'host': "myhost",
'namespace': "some/namespace",
'username': "conmon",
'password': "conmon",
'class': "Win32_OperatingSystem",
'metrics': [["NumberOfProcesses", "system.proc.count", "gauge"],
["NumberOfUsers", "system.users.count", "gauge"]]
}
WMI_CONFIG = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'tag_by': "Name",
'constant_tags': ["foobar"],
}
WMI_NON_DIGIT_PROP = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["NonDigit", "winsys.nondigit", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
}
WMI_MISSING_PROP_CONFIG = {
'class': "Win32_PerfRawData_PerfOS_System",
'metrics': [["UnknownCounter", "winsys.unknowncounter", "gauge"],
["MissingProperty", "this.will.not.be.reported", "gauge"]],
}
WMI_CONFIG_NO_TAG_BY = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
}
WMI_CONFIG_FILTERS = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'filters': [{'Name': "_Total"}],
}
WMI_TAG_QUERY_CONFIG_TEMPLATE = {
'class': "Win32_PerfFormattedData_PerfProc_Process",
'metrics': [["IOReadBytesPerSec", "proc.io.bytes_read", "gauge"]],
'filters': [{'Name': "chrome"}],
}
@classmethod
def _make_wmi_tag_query_config(cls, tag_queries):
"""
Helper to create a WMI configuration on
`Win32_PerfFormattedData_PerfProc_Process.IOReadBytesPerSec` with the given
`tag_queries` parameter.
"""
wmi_tag_query_config = {}
wmi_tag_query_config.update(cls.WMI_TAG_QUERY_CONFIG_TEMPLATE)
queries = tag_queries if all(isinstance(elem, list) for elem in tag_queries) \
else [tag_queries]
wmi_tag_query_config['tag_queries'] = queries
return wmi_tag_query_config
def _get_wmi_sampler(self):
"""
Helper to easily retrieve, if exists and unique, the WMISampler created
by the configuration.
Fails when multiple samplers are avaiable.
"""
self.assertTrue(
self.check.wmi_samplers,
u"Unable to retrieve the WMISampler: no sampler was found"
)
self.assertEquals(
len(self.check.wmi_samplers), 1,
u"Unable to retrieve the WMISampler: expected a unique, but multiple were found"
)
return self.check.wmi_samplers.itervalues().next()
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
# Run check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# A WMISampler is cached
self.assertInPartial("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_samplers)
wmi_sampler = self.getProp(self.check.wmi_samplers, "myhost:some/namespace:Win32_OperatingSystem")
# Connection was established with the right parameters
self.assertWMIConn(wmi_sampler, "myhost")
self.assertWMIConn(wmi_sampler, "some/namespace")
def test_wmi_sampler_initialization(self):
"""
An instance creates its corresponding WMISampler.
"""
# Run check
config = {
'instances': [self.WMI_CONFIG_FILTERS]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Assert the sampler
self.assertEquals(wmi_sampler.class_name, "Win32_PerfFormattedData_PerfDisk_LogicalDisk")
self.assertEquals(wmi_sampler.property_names, ["AvgDiskBytesPerWrite", "FreeMegabytes"])
self.assertEquals(wmi_sampler.filters, [{'Name': "_Total"}])
def test_wmi_properties(self):
"""
Compute a (metric name, metric type) by WMI property map and a property list.
"""
# Set up the check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# WMI props are cached
self.assertInPartial("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_props)
metric_name_and_type_by_property, properties = \
self.getProp(self.check.wmi_props, "myhost:some/namespace:Win32_OperatingSystem")
# Assess
self.assertEquals(
metric_name_and_type_by_property,
{
'numberofprocesses': ("system.proc.count", "gauge"),
'numberofusers': ("system.users.count", "gauge")
}
)
self.assertEquals(properties, ["NumberOfProcesses", "NumberOfUsers"])
def test_metric_extraction(self):
"""
Extract metrics from WMI query results.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import WMIMetric # noqa
# Set up the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(wmi_sampler, "name", [], ["foobar"])
# Assess
expected_metrics = [
WMIMetric("freemegabytes", 19742, ["foobar", "name:c:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:c:"]),
WMIMetric("freemegabytes", 19742, ["foobar", "name:d:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:d:"]),
]
self.assertEquals(metrics, expected_metrics)
def test_missing_property(self):
"""
Do not raise on missing properties, but print a warning.
"""
# Set up the check
config = {
'instances': [self.WMI_MISSING_PROP_CONFIG]
}
logger = Mock()
self.run_check(config, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_warnings_on_non_digit(self):
"""
Log a warning on non digit property values except for:
* 'Name' property
* 'tag_by' associated property
"""
wmi_instance = self.WMI_NON_DIGIT_PROP.copy()
config = {
'instances': [wmi_instance]
}
logger = Mock()
# Log a warning about 'NonDigit' property
self.run_check(config, mocks={'log': logger})
self.assertEquals(logger.warning.call_count, 1)
# No warnings on `tag_by` property neither on `Name`
del wmi_instance['metrics'][0]
wmi_instance['tag_by'] = "NonDigit"
self.run_check(config, mocks={'log': logger})
self.assertEquals(logger.warning.call_count, 1)
def test_query_timeouts(self):
"""
Gracefully handle WMI query timeouts.
"""
def __patched_init__(*args, **kwargs):
"""
Force `timeout_duration` value.
"""
kwargs['timeout_duration'] = 0.1
return wmi_constructor(*args, **kwargs)
# Increase WMI queries' runtime
SWbemServices._exec_query_run_time = 0.2
# Patch WMISampler to decrease timeout tolerance
from checks.libs.wmi.sampler import WMISampler
wmi_constructor = WMISampler.__init__
WMISampler.__init__ = __patched_init__
# Set up the check
config = {
'instances': [self.WMI_CONFIG]
}
logger = Mock()
# No exception is raised but a WARNING is logged
self.run_check(config, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_mandatory_tag_by(self):
"""
Exception is raised when the result returned by the WMI query contains multiple rows
but no `tag_by` value was given.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import MissingTagBy # noqa
# Valid configuration
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Invalid
config = {
'instances': [self.WMI_CONFIG_NO_TAG_BY]
}
self.assertRaises(MissingTagBy, self.run_check, config, force_reload=True)
def test_query_tag_properties(self):
"""
WMISampler's property list contains `metrics` and `tag_queries` ones.
"""
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# WMI props are cached
self.assertInPartial(
"localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process",
self.check.wmi_props
)
_, properties = \
self.getProp(self.check.wmi_props, "localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process")
self.assertEquals(properties, ["IOReadBytesPerSec", "IDProcess"])
def test_query_tags(self):
"""
Tag extracted metrics with `tag_queries` queries.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import WMIMetric # noqa
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(
wmi_sampler, "name",
tag_queries=[tag_queries], constant_tags=["foobar"]
)
# Assess
expected_metrics = [
WMIMetric("ioreadbytespersec", 20455, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
WMIMetric('idprocess', 4036, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
]
self.assertEquals(metrics, expected_metrics)
def test_query_tags_failures(self):
"""
Check different `tag_queries` failure scenarios.
"""
# Mock the logger so it can be traced
logger = Mock()
# Raise when user `tag_queries` input has a wrong format
tag_queries = ["IDProcess", "MakesNoSense"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(IndexError, self.run_check, config, mocks={'log': logger})
self.assertEquals(logger.error.call_count, 1)
# Raise when user `link_source_property` is not a class's property
tag_queries = ["UnknownProperty", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 2)
# Raise when user `target property` is not a target class's property
tag_queries = ["IDProcess", "Win32_Process", "Handle", "UnknownProperty"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 3)
# Do not raise on result returned, print a warning and continue
tag_queries = [
"ResultNotMatchingAnyTargetProperty", "Win32_Process", "Handle", "CommandLine"
]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config, force_reload=True, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_check(self):
"""
Assess check coverage.
"""
# Run the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
for _, mname, _ in self.WMI_CONFIG['metrics']:
self.assertMetric(mname, tags=["foobar", "name:c:"], count=1)
self.assertMetric(mname, tags=["foobar", "name:d:"], count=1)
self.coverage_report()
| |
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test import support
import sys, os
import uu
from io import BytesIO
import io
plaintext = b"The smooth-scaled python crept over the sleeping dog\n"
encodedtext = b"""\
M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P
(:6YG(&1O9PH """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename):
return (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
def test_decode(self):
inp = io.BytesIO(encodedtextwrapped(0o666, "t1"))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1")
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception thrown")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception thrown")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def _kill(self, f):
# close and remove file
if f is None:
return
try:
f.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
try:
os.unlink(f.name)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
def setUp(self):
self.tmpin = support.TESTFN + "i"
self.tmpout = support.TESTFN + "o"
def tearDown(self):
del self.tmpin
del self.tmpout
def test_encode(self):
fin = fout = None
try:
support.unlink(self.tmpin)
fin = open(self.tmpin, 'wb')
fin.write(plaintext)
fin.close()
fin = open(self.tmpin, 'rb')
fout = open(self.tmpout, 'wb')
uu.encode(fin, fout, self.tmpin, mode=0o644)
fin.close()
fout.close()
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
finally:
self._kill(fin)
self._kill(fout)
def test_decode(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
finally:
self._kill(f)
def test_decode_filename(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
uu.decode(self.tmpin)
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
finally:
self._kill(f)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
f = None
try:
f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout))
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpin, 'rb')
self.assertRaises(uu.Error, uu.decode, f)
f.close()
finally:
self._kill(f)
def test_main():
support.run_unittest(UUTest,
UUStdIOTest,
UUFileTest,
)
if __name__=="__main__":
test_main()
| |
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""
unit testing code for calculations in rdkit.Chem.MolSurf
"""
from collections import namedtuple
import os.path
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import MolSurf
doLong = False
TestData = namedtuple('TestData', 'lineNo,smiles,mol,expected')
class TestCase(unittest.TestCase):
dataNCI200 = os.path.join(RDConfig.RDDataDir, 'NCI', 'first_200.tpsa.csv')
dataNCI5000 = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'NCI_5K_TPSA.csv')
dataTPSAregr = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'tpsa_regr.csv')
@classmethod
def readNCI_200(cls):
for data in readPSAtestData(cls.dataNCI200):
yield data
@classmethod
def readNCI_5000(cls):
for data in readPSAtestData(cls.dataNCI5000):
yield data
@classmethod
def readTPSAregres(cls):
for data in readPSAtestData(cls.dataTPSAregr):
yield data
def testTPSAShort(self):
for data in self.readNCI_200():
calc = MolSurf.TPSA(data.mol)
self.assertAlmostEqual(
calc, data.expected, delta=1e-4,
msg='bad TPSA for SMILES {0.smiles} ({1:.2f} != {0.expected:.2f})'.format(data, calc))
def testTPSALong(self):
if not doLong:
raise unittest.SkipTest('long test')
for data in self.readNCI_5000():
try:
calc = MolSurf.TPSA(data.mol)
except Exception:
raise AssertionError(
'Line {0.lineNo}: TPSA Calculation failed for SMILES {0.smiles}'.format(data))
self.assertAlmostEqual(
calc, data.expected, delta=1e-4,
msg='bad TPSA for SMILES {0.smiles} ({1:.2f} != {0.expected:.2f})'.format(data, calc))
def testTPSALongNCI(self):
if not doLong:
raise unittest.SkipTest('long test')
for data in self.readTPSAregres():
try:
calc = MolSurf.TPSA(data.mol)
except Exception:
raise AssertionError(
'Line {0.lineNo}: TPSA Calculation failed for SMILES {0.smiles}'.format(data))
self.assertAlmostEqual(
calc, data.expected, delta=1e-4,
msg='bad TPSA for SMILES {0.smiles} ({1:.2f} != {0.expected:.2f})'.format(data, calc))
def testHsAndTPSA(self):
"""
testing the impact of Hs in the graph on PSA calculations
This was sf.net issue 1969745
"""
mol = Chem.MolFromSmiles('c1c[nH]cc1')
molH = Chem.AddHs(mol)
psa = MolSurf.TPSA(mol)
psaH = MolSurf.TPSA(molH)
if psa != psaH:
psac = MolSurf.rdMolDescriptors._CalcTPSAContribs(mol)
psaHc = MolSurf.rdMolDescriptors._CalcTPSAContribs(molH)
for i, v in enumerate(psac):
print('\t', i, '\t', v, '\t', psaHc[i])
while i < len(psaHc):
print('\t\t\t', psaHc[i])
i += 1
self.assertEqual(psa, psaH)
for data in self.readNCI_200():
mol = Chem.AddHs(data.mol)
calc = MolSurf.TPSA(mol)
self.assertAlmostEqual(
calc, data.expected, delta=1e-4,
msg='bad TPSA for SMILES {0.smiles} ({1:.2f} != {0.expected:.2f})'.format(data, calc))
if doLong:
for data in self.readNCI_5000():
mol = Chem.AddHs(data.mol)
calc = MolSurf.TPSA(mol)
self.assertAlmostEqual(
calc, data.expected, delta=1e-4,
msg='bad TPSA for SMILES {0.smiles} ({1:.2f} != {0.expected:.2f})'.format(data, calc))
class TestCase_descriptorRegression(unittest.TestCase):
def __testDesc(self, fileN, col, func):
for data in readRegressionData(fileN, col):
if abs(data.expected - 666.0) < 1e-4:
print(data)
raise AssertionError('check this data entry')
try:
val = func(data.mol)
except Exception:
val = 666
self.assertAlmostEqual(
val, data.expected, delta=1e-4,
msg='line {0.lineNo}, mol {0.smiles} (calc = {1}) should have val = {0.expected}'.format(
data, val))
def testLabuteASALong(self):
if not doLong:
raise unittest.SkipTest('long test')
col = 6
self.__testDesc('PP_descrs_regress.csv', col, lambda x: MolSurf.LabuteASA(x, includeHs=1))
self.__testDesc('PP_descrs_regress.2.csv', col, lambda x: MolSurf.LabuteASA(x, includeHs=1))
def testTPSALong(self):
col = 28
self.__testDesc('PP_descrs_regress.csv', col, MolSurf.TPSA)
if doLong:
self.__testDesc('PP_descrs_regress.2.csv', col, MolSurf.TPSA)
def testMOELong(self):
if not doLong:
raise unittest.SkipTest('long test')
fName = 'PP_descrs_regress.VSA.csv'
self.__testDesc(fName, 1, MolSurf.SMR_VSA1)
self.__testDesc(fName, 2, MolSurf.SMR_VSA10)
self.__testDesc(fName, 3, MolSurf.SMR_VSA2)
self.__testDesc(fName, 4, MolSurf.SMR_VSA3)
self.__testDesc(fName, 5, MolSurf.SMR_VSA4)
self.__testDesc(fName, 6, MolSurf.SMR_VSA5)
self.__testDesc(fName, 7, MolSurf.SMR_VSA6)
self.__testDesc(fName, 8, MolSurf.SMR_VSA7)
self.__testDesc(fName, 9, MolSurf.SMR_VSA8)
self.__testDesc(fName, 10, MolSurf.SMR_VSA9)
self.__testDesc(fName, 11, MolSurf.SlogP_VSA1)
self.__testDesc(fName, 12, MolSurf.SlogP_VSA10)
self.__testDesc(fName, 13, MolSurf.SlogP_VSA11)
self.__testDesc(fName, 14, MolSurf.SlogP_VSA12)
fName = 'PP_descrs_regress.VSA.2.csv'
self.__testDesc(fName, 1, MolSurf.SMR_VSA1)
self.__testDesc(fName, 2, MolSurf.SMR_VSA10)
self.__testDesc(fName, 11, MolSurf.SlogP_VSA1)
self.__testDesc(fName, 12, MolSurf.SlogP_VSA10)
self.__testDesc(fName, 13, MolSurf.SlogP_VSA11)
self.__testDesc(fName, 14, MolSurf.SlogP_VSA12)
class TestCase_python(unittest.TestCase):
""" Test the Python implementation of the various descriptors """
def test_pyTPSA(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
self.assertAlmostEqual(MolSurf.TPSA(data.mol), MolSurf._pyTPSA(molPy))
def test_pyLabuteASA(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
self.assertAlmostEqual(MolSurf.LabuteASA(data.mol), MolSurf.pyLabuteASA(molPy))
def test_pyPEOE_VSA_(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
for calcC, calcPy in zip(MolSurf.PEOE_VSA_(data.mol), MolSurf.pyPEOE_VSA_(molPy,
force=False)):
self.assertAlmostEqual(calcC, calcPy)
def test_pySlogP_VSA_(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
for calcC, calcPy in zip(
MolSurf.SlogP_VSA_(data.mol), MolSurf.pySlogP_VSA_(molPy, force=False)):
self.assertAlmostEqual(calcC, calcPy)
def test_pySMR_VSA_(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
for calcC, calcPy in zip(MolSurf.SMR_VSA_(data.mol), MolSurf.pySMR_VSA_(molPy, force=False)):
self.assertAlmostEqual(calcC, calcPy)
def test_pyLabuteHelper(self):
for data in TestCase.readNCI_200():
molPy = Chem.MolFromSmiles(data.smiles)
for calcC, calcPy in zip(MolSurf._LabuteHelper(data.mol), MolSurf._pyLabuteHelper(molPy)):
self.assertAlmostEqual(calcC, calcPy)
def readPSAtestData(filename):
""" Read test data for PSA method from file """
with open(filename, 'r') as f:
for lineNo, line in enumerate(f, 1):
if line[0] == '#':
continue
smiles, expected = line.strip().split(',')
mol = Chem.MolFromSmiles(smiles)
if not mol:
raise AssertionError('molecule construction failed on line %d' % lineNo)
yield TestData(lineNo, smiles, mol, float(expected))
def readRegressionData(filename, col):
""" Return entries form regression dataset.
Returns the line number, smiles, molecule, and the value found in column col
"""
with open(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', filename), 'r') as inF:
for lineNum, line in enumerate(inF, 1):
if line[0] == '#':
continue
splitL = line.split(',')
smi = splitL[0]
mol = Chem.MolFromSmiles(smi)
if mol is None:
raise AssertionError('line %d, smiles: %s' % (lineNum, smi))
expected = float(splitL[col])
yield TestData(lineNum, smi, mol, expected)
if __name__ == '__main__': # pragma: nocover
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-l', default=False, action='store_true', dest='doLong')
args = parser.parse_args()
doLong = args.doLong
# Remove the -l flag if present so that it isn't interpreted by unittest.main()
if 'l' in sys.argv:
sys.argv.remove('-l')
unittest.main()
| |
"""
Installs and configures nova
"""
import os
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.ospluginutils import NovaConfig, getManifestTemplate, appendManifestFile, manifestfiles
# Controller object will be initialized from main flow
controller = None
PLUGIN_NAME = "OS-NOVA"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
nova_params = {
"NOVA" : [
{"CMD_OPTION" : "novaapi-host",
"USAGE" : "The IP address of the server on which to install the Nova API service",
"PROMPT" : "Enter the IP address of the Nova API service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_API_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacert-host",
"USAGE" : "The IP address of the server on which to install the Nova Cert service",
"PROMPT" : "Enter the IP address of the Nova Cert service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CERT_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novavncproxy-hosts",
"USAGE" : "The IP address of the server on which to install the Nova VNC proxy",
"PROMPT" : "Enter the IP address of the Nova VNC proxy",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_VNCPROXY_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacompute-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install the Nova Compute services",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Nova Compute services",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty, validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novaconductor-host",
"USAGE" : "The IP address of the server on which to install the Nova Conductor service",
"PROMPT" : "Enter the IP address of the Nova Conductor service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CONDUCTOR_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "nova-db-passwd",
"USAGE" : "The password to use for the Nova to access DB",
"PROMPT" : "Enter the password for the Nova DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "nova-ks-passwd",
"USAGE" : "The password to use for the Nova to authenticate with Keystone",
"PROMPT" : "Enter the password for the Nova Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-host",
"USAGE" : "The IP address of the server on which to install the Nova Scheduler service",
"PROMPT" : "Enter the IP address of the Nova Scheduler service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-cpu-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical CPUs. "
"Set to 1.0 to disable CPU overcommitment",
"PROMPT" : "Enter the CPU overcommitment ratio. "
"Set to 1.0 to disable CPU overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 16.0,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-ram-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical RAM. "
"Set to 1.0 to disable RAM overcommitment",
"PROMPT" : "Enter the RAM overcommitment ratio. "
"Set to 1.0 to disable RAM overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 1.5,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"NOVA_NETWORK" : [
{"CMD_OPTION" : "novacompute-privif",
"USAGE" : "Private interface for Flat DHCP on the Nova compute servers",
"PROMPT" : "Enter the Private interface for Flat DHCP on the Nova compute servers",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-host",
"USAGE" : "The IP address of the server on which to install the Nova Network service",
"PROMPT" : "Enter the IP address of the Nova Network service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-pubif",
"USAGE" : "Public interface on the Nova network server",
"PROMPT" : "Enter the Public interface on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth0",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-privif",
"USAGE" : "Private interface for Flat DHCP on the Nova network server",
"PROMPT" : "Enter the Private interface for Flat DHCP on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-fixed-range",
"USAGE" : "IP Range for Flat DHCP",
"PROMPT" : "Enter the IP Range for Flat DHCP",
"OPTION_LIST" : ["^([\d]{1,3}\.){3}[\d]{1,3}/\d\d?$"],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "192.168.32.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-floating-range",
"USAGE" : "IP Range for Floating IP's",
"PROMPT" : "Enter the IP Range for Floating IP's",
"OPTION_LIST" : ["^([\d]{1,3}\.){3}[\d]{1,3}/\d\d?$"],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "10.3.4.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-default-floating-pool",
"USAGE" : "Name of the default floating pool to which the specified floating ranges are added to",
"PROMPT" : "What should the default floating pool be called?",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "nova",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_DEFAULTFLOATINGPOOL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-auto-assign-floating-ip",
"USAGE" : "Automatically assign a floating IP to new instances",
"PROMPT" : "Should new instances automatically have a floating IP assigned?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
}
def use_nova_network(config):
return config['CONFIG_NOVA_INSTALL'] == 'y' and \
config['CONFIG_QUANTUM_INSTALL'] != 'y'
nova_groups = [
{ "GROUP_NAME" : "NOVA",
"DESCRIPTION" : "Nova Options",
"PRE_CONDITION" : "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True},
{ "GROUP_NAME" : "NOVA_NETWORK",
"DESCRIPTION" : "Nova Network Options",
"PRE_CONDITION" : use_nova_network,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True},
]
for group in nova_groups:
paramList = nova_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
novaapisteps = [
{'title': 'Adding Nova API manifest entries', 'functions':[createapimanifest]},
{'title': 'Adding Nova Keystone manifest entries', 'functions':[createkeystonemanifest]},
{'title': 'Adding Nova Cert manifest entries', 'functions':[createcertmanifest]},
{'title': 'Adding Nova Conductor manifest entries', 'functions':[createconductormanifest]},
{'title': 'Adding Nova Compute manifest entries', 'functions':[createcomputemanifest]},
{'title': 'Adding Nova Scheduler manifest entries', 'functions':[createschedmanifest]},
{'title': 'Adding Nova VNC Proxy manifest entries', 'functions':[createvncproxymanifest]},
{'title': 'Adding Nova Common manifest entries', 'functions':[createcommonmanifest]},
]
if controller.CONF['CONFIG_QUANTUM_INSTALL'] == 'y':
novaapisteps.append({'title': 'Adding Openstack Network-related Nova manifest entries', 'functions':[createquantummanifest]})
else:
novaapisteps.append({'title': 'Adding Nova Network manifest entries', 'functions':[createnetworkmanifest]})
controller.addSequence("Installing OpenStack Nova API", [], [], novaapisteps)
def createapimanifest(config):
# This is a hack around us needing to generate the quantum metadata
# password, but the nova puppet plugin uses the existence of that
# password to determine whether or not to configure quantum metadata
# proxy support. So the nova_api.pp template needs unquoted 'undef'
# to disable metadata support if quantum is not being installed.
if controller.CONF['CONFIG_QUANTUM_INSTALL'] != 'y':
controller.CONF['CONFIG_QUANTUM_METADATA_PW_UNQUOTED'] = 'undef'
else:
controller.CONF['CONFIG_QUANTUM_METADATA_PW_UNQUOTED'] = \
"'%s'" % controller.CONF['CONFIG_QUANTUM_METADATA_PW']
manifestfile = "%s_api_nova.pp"%controller.CONF['CONFIG_NOVA_API_HOST']
manifestdata = getManifestTemplate("nova_api.pp")
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def createkeystonemanifest(config):
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_nova.pp")
appendManifestFile(manifestfile, manifestdata)
def createcertmanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CERT_HOST']
manifestdata = getManifestTemplate("nova_cert.pp")
appendManifestFile(manifestfile, manifestdata)
def createconductormanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CONDUCTOR_HOST']
manifestdata = getManifestTemplate("nova_conductor.pp")
appendManifestFile(manifestfile, manifestdata)
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so Openstack can work'
' properly.' % (device, host))
raise ScriptRuntimeError(msg)
def createcomputemanifest(config):
dirty = controller.CONF["CONFIG_NOVA_COMPUTE_HOSTS"].split(",")
hostlist = [i.strip() for i in dirty if i.strip()]
for host in hostlist:
controller.CONF["CONFIG_NOVA_COMPUTE_HOST"] = host
manifestdata = getManifestTemplate("nova_compute.pp")
manifestfile = "%s_nova.pp"%host
nova_config_options = NovaConfig()
if controller.CONF['CONFIG_QUANTUM_INSTALL'] != 'y':
if host != controller.CONF["CONFIG_NOVA_NETWORK_HOST"]:
nova_config_options.addOption("DEFAULT/flat_interface", controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
check_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
try:
bring_up_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
except ScriptRuntimeError, ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ScriptRuntimeError))
appendManifestFile(manifestfile, manifestdata + "\n" + nova_config_options.getManifestEntry())
def createnetworkmanifest(config):
if controller.CONF['CONFIG_QUANTUM_INSTALL'] == "y":
return
host = controller.CONF['CONFIG_NOVA_NETWORK_HOST']
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
check_ifcfg(host, controller.CONF[i])
try:
bring_up_ifcfg(host, controller.CONF[i])
except ScriptRuntimeError, ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ScriptRuntimeError))
if controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] == "y":
controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] = True
else:
controller.CONF['CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP'] = False
# We need to explicitly set the network size
routing_prefix = controller.CONF['CONFIG_NOVA_NETWORK_FIXEDRANGE'].split('/')[1]
net_size = 2**(32 - int(routing_prefix))
controller.CONF['CONFIG_NOVA_NETWORK_FIXEDSIZE'] = str(net_size)
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network.pp")
appendManifestFile(manifestfile, manifestdata)
def createschedmanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_SCHED_HOST']
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def createvncproxymanifest(config):
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_VNCPROXY_HOST']
manifestdata = getManifestTemplate("nova_vncproxy.pp")
appendManifestFile(manifestfile, manifestdata)
def createcommonmanifest(config):
dbhost = config['CONFIG_MYSQL_HOST']
dirty = controller.CONF["CONFIG_NOVA_COMPUTE_HOSTS"].split(",")
nopass_nodes = [i.strip() for i in dirty if i.strip()]
dirty = [config.get('CONFIG_NOVA_CONDUCTOR_HOST'),
config.get('CONFIG_NOVA_API_HOST'),
config.get('CONFIG_NOVA_CERT_HOST'),
config.get('CONFIG_NOVA_VNCPROXY_HOST'),
config.get('CONFIG_NOVA_SCHED_HOST'),
config.get('CONFIG_NOVA_NETWORK_HOST')]
dbpass_nodes = [i.strip() for i in dirty if i and i.strip()]
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
host, manifest = manifestfile.split('_', 1)
host = host.strip()
if host in nopass_nodes and host not in dbpass_nodes:
# we should omit password in case we are installing only
# nova-compute to the host
perms = "nova"
else:
perms = "nova:%(CONFIG_NOVA_DB_PW)s" % config
config['CONFIG_NOVA_SQL_CONN'] = ("mysql://%s@%s/nova"
% (perms, dbhost))
data = getManifestTemplate("nova_common.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
def createquantummanifest(config):
if controller.CONF['CONFIG_QUANTUM_INSTALL'] != "y":
return
if controller.CONF['CONFIG_QUANTUM_L2_PLUGIN'] == 'openvswitch':
controller.CONF['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver'
else:
controller.CONF['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_quantum.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "metpy-"
cfg.versionfile_source = "metpy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
import os
import logging
import click
import AppKit
from collections import Mapping
from Foundation import *
from AppKit import *
from avashell.utils import resource_path
from avashell.osx.window import Window
from ava import launcher
from ava.user import status
from ..base import *
from . import msgbox
from .cocoa import Delegate
_NOTIFICATIONS = True
try:
from Foundation import NSUserNotification, NSUserNotificationCenter
except ImportError:
_NOTIFICATIONS = False
logger = logging.getLogger(__name__)
def applicationSupportFolder(self):
paths = NSSearchPathForDirectoriesInDomains(NSApplicationSupportDirectory,NSUserDomainMask,True)
basePath = (len(paths) > 0 and paths[0]) or NSTemporaryDirectory()
fullPath = basePath.stringByAppendingPathComponent_("Ava")
if not os.path.exists(fullPath):
os.mkdir(fullPath)
return fullPath
def notification(title, subtitle, message, data=None, sound=True):
"""Send a notification to Notification Center (Mac OS X 10.8+). If running on a version of Mac OS X that does not
support notifications, a ``RuntimeError`` will be raised. Apple says,
"The userInfo content must be of reasonable serialized size (less than 1k) or an exception will be thrown."
So don't do that!
:param title: text in a larger font.
:param subtitle: text in a smaller font below the `title`.
:param message: text representing the body of the notification below the `subtitle`.
:param data: will be passed to the application's "notification center" (see :func:`rumps.notifications`) when this
notification is clicked.
:param sound: whether the notification should make a noise when it arrives.
"""
if not _NOTIFICATIONS:
raise RuntimeError('Mac OS X 10.8+ is required to send notifications')
if data is not None and not isinstance(data, Mapping):
raise TypeError('notification data must be a mapping')
_require_string_or_none(title, subtitle, message)
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(message)
notification.setUserInfo_({} if data is None else data)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
notification.setDeliveryDate_(NSDate.dateWithTimeInterval_sinceDate_(0, NSDate.date()))
nc = NSUserNotificationCenter.defaultUserNotificationCenter()
if nc is not None:
nc.scheduleNotification_(notification)
def _require_string_or_none(*objs):
for obj in objs:
if not(obj is None or isinstance(obj, basestring)):
raise TypeError('a string or None is required but given {0}, a {1}'.format(obj, type(obj).__name__))
class AppDelegate(Delegate):
status = 'Ready'
def __init__(self):
self.notices = []
self.status_menu = None
self._console = None
def init(self):
s = super(AppDelegate, self).init()
if s is None:
return None
return s
def applicationDidFinishLaunching_(self, sender):
logger.debug("Application did finish launching.")
logger.debug("Icon file: %s", resource_path('ava/res/eavatar.png'))
statusbar = NSStatusBar.systemStatusBar()
self.statusicon = statusbar.statusItemWithLength_(NSVariableStatusItemLength)
self.icon = NSImage.alloc().initByReferencingFile_(resource_path('res/icon.png'))
self.icon.setScalesWhenResized_(True)
self.icon.setSize_((20, 20))
self.statusicon.setImage_(self.icon)
self.statusicon.setHighlightMode_(True)
self.statusicon.setEnabled_(True)
#make the menu
self.menubarMenu = NSMenu.alloc().init()
# self.statusItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(self.status, None, '')
# self.menubarMenu.addItem_(self.statusItem)
# self.menuItem = NSMenuItem.separatorItem()
# self.menubarMenu.addItem_(self.menuItem)
self.openItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(STR_OPEN_WEBFRONT, 'openWebfront:', '')
self.menubarMenu.addItem_(self.openItem)
self.openItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(STR_OPEN_FOLDER, 'openFolder:', '')
self.menubarMenu.addItem_(self.openItem)
# self.openItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(STR_OPEN_CONSOLE, 'openConsole:', '')
# self.menubarMenu.addItem_(self.openItem)
self.menubarMenu.addItem_(NSMenuItem.separatorItem())
mi = self.menubarMenu.addItemWithTitle_action_keyEquivalent_(STR_STATUS_MENU, None, "")
self.create_status_menu()
self.menubarMenu.setSubmenu_forItem_(self.status_menu, mi)
self.menubarMenu.addItem_(NSMenuItem.separatorItem())
mi = self.menubarMenu.addItemWithTitle_action_keyEquivalent_(STR_NOTICES_MENU, None, "")
self.notices_menu = self.create_notices_menu()
self.menubarMenu.setSubmenu_forItem_(self.notices_menu, mi)
# self.menubarMenu.addItemWithTitle_action_keyEquivalent_('Clear All', 'clearNotices:', '')
self.menubarMenu.addItem_(NSMenuItem.separatorItem())
self.quit = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(STR_EXIT, 'quitApp:', '')
self.menubarMenu.addItem_(self.quit)
#add menu to statusitem
self.statusicon.setMenu_(self.menubarMenu)
self.statusicon.setToolTip_(STR_STATUS)
def applicationWillTerminate_(self, sender):
logger.debug("Application will terminate.")
def windowWillClose_(self, aNotification):
logger.debug("Console window will close.")
def windowShouldClose_(self, sender):
logger.debug("Console window should close.")
self.shell.console.hide()
return False
def userNotificationCenter_didActivateNotification_(self, notification_center, notification):
notification_center.removeDeliveredNotification_(notification)
data = dict(notification.userInfo())
logger.debug("Notification: %s", data)
def alert(self, msg, title="Important Message"):
self.app.activateIgnoringOtherApps_(True)
alert = NSAlert.alloc().init()
alert.setMessageText_(title)
alert.setInformativeText_(msg)
alert.setAlertStyle_(NSInformationalAlertStyle)
alert.runModal()
def create_notices_menu(self):
m = AppKit.NSMenu.alloc().initWithTitle_("Notices")
return m
def create_status_menu(self):
self.status_menu = AppKit.NSMenu.alloc().initWithTitle_("Status")
for i, s in enumerate(status.STRINGS):
item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(s, 'updateUserStatus:', '')
if self.shell.user_status == i:
item.setState_(1)
self.status_menu.addItem_(item)
return self.status_menu
def clearNotices_(self, sender):
logger.debug("Clear all messages.")
self.notices_menu.removeAllItems()
def showNotice_(self, sender):
index = self.notices_menu.indexOfItem_(sender)
logger.debug("Item index: %r" % index)
notice = self.shell.get_notice_at(index)
msgbox.show_notice(notice)
def addNewNotice(self, notice, pop_last=False):
mi = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(notice.title, 'showNotice:', '')
self.notices_menu.insertItem_atIndex_(mi, 0)
if pop_last:
self.notices_menu.removeItemAtIndex_(NUM_OF_NOTICES)
def status_(self, sender):
pass
def updateUserStatus_(self, sender):
old_item = self.status_menu.itemAtIndex_(self.shell.user_status)
old_item.setState_(0)
index = self.status_menu.indexOfItem_(sender)
self.shell.user_status = index
sender.setState_(1)
def openFolder_(self, sender):
click.launch(launcher.get_app_dir())
def openWebfront_(self, sender):
self.shell.open_ui()
def openConsole_(self, sender):
if self.shell.console is None:
self.shell.console = Console(self.shell)
self.shell.console.show()
def openHelp_(self, notification):
self.shell.open_help()
def quitApp_(self, notification):
nsapplication = NSApplication.sharedApplication()
logger.debug('closing application')
nsapplication.terminate_(notification)
class Shell(ShellBase):
def __init__(self):
super(Shell, self).__init__()
self.app = None
self.delegate = None
self.mainframe = None
self.console = None
def doIdle_(self, timer):
self.process_idle_tasks()
def on_user_notified(self, notice):
try:
pop_last = len(self.notices) >= NUM_OF_NOTICES
print(len(self.notices))
self.notices.append(notice)
self.delegate.addNewNotice(notice, pop_last)
if self.should_notify(notice):
notification(title=notice.title, subtitle="", message=notice.message)
except:
logger.error("Failed to send notice", exc_info=True)
def _run(self):
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(1, self, 'doIdle:', "", True)
self.app = NSApplication.sharedApplication()
self.app.activateIgnoringOtherApps_(True)
self.delegate = AppDelegate.alloc().init()
self.delegate.shell = self
self.delegate.app = self.app
self.app.setDelegate_(self.delegate)
if _NOTIFICATIONS:
nc = NSUserNotificationCenter.defaultUserNotificationCenter()
if nc is not None:
nc.setDelegate_(self.app)
else:
logger.warning("No notification center object found!")
else:
logger.debug("Platform notification is not enabled.")
self.app.run()
| |
__author__ = 'John'
#modify csv files.
#lowercase every column.
#remove stop words
#remove states
#os.getcwd()
#os.chdir("/tmp/")
#os.getcwd()
import csv
import sys
import operator
import re
import os
def write_nice_csv(file_name):
reader = csv.reader(open("csv/%s" % file_name), delimiter=",")
#omit header
next(reader, None)
reader = sorted(reader, key=operator.itemgetter(0), reverse=False)
sortedlist = sorted(reader, key=operator.itemgetter(1), reverse=False)
#remove any rows with these states
bad_states = {'alaska', 'hawaii', 'puerto rico'}
#invoke writer and save the results in a nice_csv
writer = csv.writer(open("csv/nice_csv/%s" % file_name, 'w', newline=''))
for row in sortedlist:
#remove states
row[0] = row[0].lower();
row[1] = row[1].lower();
if row[1] not in bad_states:
#remove stop words
row[0] = re.sub("county|city|area|municipio|municipality| ", "", row[0]); #filtering a stop word
writer.writerow(row);
def clean_loc():
file_name = "zip_codes_states.csv";
reader = csv.reader(open("join/%s" % file_name), delimiter=",")
#open state reader
state_file = "states.csv";
state_reader = csv.reader(open("join/%s" % state_file), delimiter=",")
state_dict = {};
#set dictionaries
for row in state_reader:
#remove states
state_dict[row[2]] = row[1];
#omit header
next(reader, None)
#hi = list(reader)
#print(hi)
#print(operator.itemgetter(5))
#reader = sorted(reader, key=operator.itemgetter(5), reverse=False)
#sortedlist = sorted(reader, key=operator.itemgetter(4), reverse=False)
sortedlist = list(reader);
#remove any rows with these states
bad_states = {'PR', 'AL', 'HI', 'VI', 'AE', 'AA', 'AP', 'AS'}
#invoke writer and save the results in a nice_csv
writer = csv.writer(open("join/new_%s" % file_name, 'w', newline=''))
for row in sortedlist:
#remove states
row[3] = row[3].lower();
row[5] = row[5].lower();
if row[4] not in bad_states:
#remove stop words
row[5] = re.sub("county|city|area|municipio|municipality| ", "", row[5]); #filtering a stop word
if(row[4] in state_dict):
row[4] = state_dict[row[4]];
writer.writerow(row);
#creates the dictionary to translate from abbreviated to full and translate fips dataset into this form.
def abbr_to_full():
#updates states.csv
state_file = "states.csv";
reader = csv.reader(open("join/%s" % state_file), delimiter=",")
state_dict = {};
#set dictionaries
for row in reader:
#remove states
state_dict[row[2]] = row[1];
return state_dict;
#Determine the full form of the states name in the fips dataset.
def full_fips(): #makes fips into cleaner form
#row[0] represents the abbreviated states
state_dict = abbr_to_full();
filename = "fips.csv";
reader = csv.reader(open("FIPS and Population/%s" % filename), delimiter=",")
next(reader, None)
#sort the fips data set
reader = sorted(reader, key=operator.itemgetter(3), reverse=False)
sortedlist = list(reader);
#remove any rows with these states
#bad_states = {'PR', 'AL', 'HI', 'VI', 'AE', 'AA', 'AP', 'AS'}
bad_states = {'PR', 'AK', 'HI'}
#invoke writer and save the results in a nice_csv
writer = csv.writer(open("join/%s" % filename, 'w', newline=''))
for row in sortedlist:
#row[3] = row[3].lower();
if row[0] not in bad_states:
#remove stop words
if(row[0] in state_dict):
row[0] = state_dict[row[0]];
#row[3] = re.sub("county|city|area|municipio|municipality| ", "", row[3]); #filtering a stop word
writer.writerow(row);
def write_nice_csv2(file_name):
reader = csv.reader(open("csv/%s" % file_name), delimiter=",")
#omit header
next(reader, None)
reader = sorted(reader, key=operator.itemgetter(0), reverse=False)
sortedlist = sorted(reader, key=operator.itemgetter(1), reverse=False)
#remove any rows with these states
bad_states = {'alaska', 'hawaii', 'puerto rico', 'Alaska', 'Hawaii', 'Puerto Rico'}
#invoke writer and save the results in a nice_csv
writer = csv.writer(open("csv/nice_csv/%s" % file_name, 'w', newline=''))
for row in sortedlist:
#remove states
if row[1] not in bad_states:
#remove stop words
writer.writerow(row);
#have a list of nice directories
directory_loc ="csv/nice_csv";
fips_filename = "fips.csv";
result_filename = "demographics.csv"
directory_list = list();
reader = list();
for file in os.listdir(directory_loc):
if file.endswith(".csv"):
directory_list.append(file);
reader.append(list(csv.reader(open("csv/nice_csv/%s" % file), delimiter=","))); #create a list of .csv reader objects.
#create new table.
reader_fips = csv.reader(open("join/%s" % fips_filename), delimiter=",")
fips_header = ["USPS", "GEOID", "ANSICODE", "NAME", "POP10", "HU10", "ALAND", "AWATER", "ALAND_SQMI", "AWATER_SQMI", "INTPTLAT", "INTPTLONG"];
writer = csv.writer(open("join/%s" % result_filename, 'w', newline=''))
desired_elements = [3, 0, 1, 10, 11, 4, 5, 6, 7, 8, 9] #obtains the desired elements from the list
for i in range(len(directory_list)):
directory_list[i]= re.sub("\.csv|[(),]", "", directory_list[i] )
directory_list[i]= re.sub(" ", "_", directory_list[i] )
directory_list[i] = directory_list[i].lower();
header = list(operator.itemgetter(*desired_elements)(fips_header));
header =header + directory_list;
writer.writerow(header);
j = 0;
for row in reader_fips:
result_row = list(operator.itemgetter(*desired_elements)(row));
for i in range(0, len(reader)):
result_row.append(reader[i][j][2]);
j=j+1;
#write the columns based on the files that were read.
writer.writerow(result_row);
# #print(type(row));
# #print(type(result_row));
#clean_loc();
#prints out all the files and converts them into nice files.
# for file in os.listdir("csv"):
# if file.endswith(".csv"):
# write_nice_csv(file);
# #verifies if each list is the same
# #determine if two lists are equivalent
# file_name2 = "Percent Male.csv";
# #Determines if the counties list for each list are identical
# for file in os.listdir("csv/nice_csv"):
# if file.endswith(".csv"):
# compare_counties("Percent Asian.csv", file);
#This part edits the lines of the table
#
# #clean_loc();
# state_file = "states.csv";
# reader = csv.reader(open("join/%s" % state_file), delimiter=",")
# state_dict = {};
# #set dictionaries
# for row in reader:
# #remove states
# state_dict[row[2]] = row[1];
# #location
# #county - row[5]
# #state - row[4]
#
# #convert data into new_state.csv into nice format.
#
# #read each county from a demographic .csv file, then find the correspond longitude and latitude for that
#
# #demo
# #county - row[0]
# #state - row[1]
# location_file = "new_zip_codes_states.csv";
#
#
# demo_file = "Median Age.csv";
# demo_reader = csv.reader(open("join/%s" % demo_file), delimiter=",")
# writer = csv.writer(open("join/updated_%s" % demo_file, 'w', newline=''))
#
# for county in demo_reader:
# loc_reader = csv.reader(open("join/%s" % location_file), delimiter=",")
# for location in loc_reader:
#
# if(location[4] == county[1] and location[5] == county[0]):
# row = county;
# row.append(location[1]);
# row.append(location[2]);
# writer.writerow(row);
# break;
| |
import py, pytest
import _pytest._code
from _pytest.config import getcfg, get_common_ancestor, determine_setup
from _pytest.main import EXIT_NOTESTSCOLLECTED
class TestParseIni:
def test_getcfg_and_config(self, testdir, tmpdir):
sub = tmpdir.mkdir("sub")
sub.chdir()
tmpdir.join("setup.cfg").write(_pytest._code.Source("""
[pytest]
name = value
"""))
rootdir, inifile, cfg = getcfg([sub], ["setup.cfg"])
assert cfg['name'] == "value"
config = testdir.parseconfigure(sub)
assert config.inicfg['name'] == 'value'
def test_getcfg_empty_path(self, tmpdir):
getcfg([''], ['setup.cfg']) #happens on py.test ""
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
monkeypatch.setenv('PYTEST_ADDOPTS', '--color no -rs --tb="short"')
tmpdir.join("setup.cfg").write(_pytest._code.Source("""
[pytest]
addopts = --verbose
"""))
config = testdir.parseconfig(tmpdir)
assert config.option.color == 'no'
assert config.option.reportchars == 's'
assert config.option.tbstyle == 'short'
assert config.option.verbose
#config = testdir.Config()
#args = [tmpdir,]
#config._preparse(args, addopts=False)
#assert len(args) == 1
def test_tox_ini_wrong_version(self, testdir):
testdir.makefile('.ini', tox="""
[pytest]
minversion=9.0
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
"*tox.ini:2*requires*9.0*actual*"
])
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_ini_names(self, testdir, name):
testdir.tmpdir.join(name).write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.0
"""))
config = testdir.parseconfig()
assert config.getini("minversion") == "1.0"
def test_toxini_before_lower_pytestini(self, testdir):
sub = testdir.tmpdir.mkdir("sub")
sub.join("tox.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 2.0
"""))
testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent("""
[pytest]
minversion = 1.5
"""))
config = testdir.parseconfigure(sub)
assert config.getini("minversion") == "2.0"
@pytest.mark.xfail(reason="probably not needed")
def test_confcutdir(self, testdir):
sub = testdir.mkdir("sub")
sub.chdir()
testdir.makeini("""
[pytest]
addopts = --qwe
""")
result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing:
def test_parsing_again_fails(self, testdir):
config = testdir.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("custom", "")
""")
testdir.makeini("""
[pytest]
custom = 0
""")
testdir.makefile(".cfg", custom = """
[pytest]
custom = 1
""")
config = testdir.parseconfig("-c", "custom.cfg")
assert config.getini("custom") == "1"
def test_absolute_win32_path(self, testdir):
temp_cfg_file = testdir.makefile(".cfg", custom="""
[pytest]
addopts = --version
""")
from os.path import normpath
temp_cfg_file = normpath(str(temp_cfg_file))
ret = pytest.main("-c " + temp_cfg_file)
assert ret == _pytest.main.EXIT_OK
class TestConfigAPI:
def test_config_trace(self, testdir):
config = testdir.parseconfig()
l = []
config.trace.root.setwriter(l.append)
config.trace("hello")
assert len(l) == 1
assert l[0] == "hello [config]\n"
def test_config_getoption(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello", "-X", dest="hello")
""")
config = testdir.parseconfig("--hello=this")
for x in ("hello", "--hello", "-X"):
assert config.getoption(x) == "this"
pytest.raises(ValueError, "config.getoption('qweqwe')")
@pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]')
def test_config_getoption_unicode(self, testdir):
testdir.makeconftest("""
from __future__ import unicode_literals
def pytest_addoption(parser):
parser.addoption('--hello', type='string')
""")
config = testdir.parseconfig('--hello=this')
assert config.getoption('hello') == 'this'
def test_config_getvalueorskip(self, testdir):
config = testdir.parseconfig()
pytest.raises(pytest.skip.Exception,
"config.getvalueorskip('hello')")
verbose = config.getvalueorskip("verbose")
assert verbose == config.option.verbose
def test_config_getvalueorskip_None(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addoption("--hello")
""")
config = testdir.parseconfig()
with pytest.raises(pytest.skip.Exception):
config.getvalueorskip('hello')
def test_getoption(self, testdir):
config = testdir.parseconfig()
with pytest.raises(ValueError):
config.getvalue('x')
assert config.getoption("x", 1) == 1
def test_getconftest_pathlist(self, testdir, tmpdir):
somepath = tmpdir.join("x", "y", "z")
p = tmpdir.join("conftest.py")
p.write("pathlist = ['.', %r]" % str(somepath))
config = testdir.parseconfigure(p)
assert config._getconftest_pathlist('notexist', path=tmpdir) is None
pl = config._getconftest_pathlist('pathlist', path=tmpdir)
print(pl)
assert len(pl) == 2
assert pl[0] == tmpdir
assert pl[1] == somepath
def test_addini(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("myname", "my new ini value")
""")
testdir.makeini("""
[pytest]
myname=hello
""")
config = testdir.parseconfig()
val = config.getini("myname")
assert val == "hello"
pytest.raises(ValueError, config.getini, 'other')
def test_addini_pathlist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("paths", "my new ini value", type="pathlist")
parser.addini("abc", "abc value")
""")
p = testdir.makeini("""
[pytest]
paths=hello world/sub.py
""")
config = testdir.parseconfig()
l = config.getini("paths")
assert len(l) == 2
assert l[0] == p.dirpath('hello')
assert l[1] == p.dirpath('world/sub.py')
pytest.raises(ValueError, config.getini, 'other')
def test_addini_args(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("args", "new args", type="args")
parser.addini("a2", "", "args", default="1 2 3".split())
""")
testdir.makeini("""
[pytest]
args=123 "123 hello" "this"
""")
config = testdir.parseconfig()
l = config.getini("args")
assert len(l) == 3
assert l == ["123", "123 hello", "this"]
l = config.getini("a2")
assert l == list("123")
def test_addini_linelist(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
parser.addini("a2", "", "linelist")
""")
testdir.makeini("""
[pytest]
xy= 123 345
second line
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 2
assert l == ["123 345", "second line"]
l = config.getini("a2")
assert l == []
@pytest.mark.parametrize('str_val, bool_val',
[('True', True), ('no', False), ('no-ini', True)])
def test_addini_bool(self, testdir, str_val, bool_val):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
""")
if str_val != 'no-ini':
testdir.makeini("""
[pytest]
strip=%s
""" % str_val)
config = testdir.parseconfig()
assert config.getini("strip") is bool_val
def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
testdir.makeini("""
[pytest]
xy= 123
""")
config = testdir.parseconfig()
l = config.getini("xy")
assert len(l) == 1
assert l == ["123"]
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 2
assert l == ["123", "456"]
def test_addinivalue_line_new(self, testdir):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("xy", "", type="linelist")
""")
config = testdir.parseconfig()
assert not config.getini("xy")
config.addinivalue_line("xy", "456")
l = config.getini("xy")
assert len(l) == 1
assert l == ["456"]
config.addinivalue_line("xy", "123")
l = config.getini("xy")
assert len(l) == 2
assert l == ["456", "123"]
class TestConfigFromdictargs:
def test_basic_behavior(self):
from _pytest.config import Config
option_dict = {
'verbose': 444,
'foo': 'bar',
'capture': 'no',
}
args = ['a', 'b']
config = Config.fromdictargs(option_dict, args)
with pytest.raises(AssertionError):
config.parse(['should refuse to parse again'])
assert config.option.verbose == 444
assert config.option.foo == 'bar'
assert config.option.capture == 'no'
assert config.args == args
def test_origargs(self):
"""Show that fromdictargs can handle args in their "orig" format"""
from _pytest.config import Config
option_dict = {}
args = ['-vvvv', '-s', 'a', 'b']
config = Config.fromdictargs(option_dict, args)
assert config.args == ['a', 'b']
assert config._origargs == args
assert config.option.verbose == 4
assert config.option.capture == 'no'
def test_inifilename(self, tmpdir):
tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source("""
[pytest]
name = value
"""))
from _pytest.config import Config
inifile = '../../foo/bar.ini'
option_dict = {
'inifilename': inifile,
'capture': 'no',
}
cwd = tmpdir.join('a/b')
cwd.join('pytest.ini').ensure().write(_pytest._code.Source("""
[pytest]
name = wrong-value
should_not_be_set = true
"""))
with cwd.ensure(dir=True).as_cwd():
config = Config.fromdictargs(option_dict, ())
assert config.args == [str(cwd)]
assert config.option.inifilename == inifile
assert config.option.capture == 'no'
# this indicates this is the file used for getting configuration values
assert config.inifile == inifile
assert config.inicfg.get('name') == 'value'
assert config.inicfg.get('should_not_be_set') is None
def test_options_on_small_file_do_not_blow_up(testdir):
def runfiletest(opts):
reprec = testdir.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = testdir.makepyfile("""
def test_f1(): assert 0
def test_f2(): assert 0
""")
for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'],
['--tb=long'], ['--fulltrace'], ['--nomagic'],
['--traceconfig'], ['-v'], ['-v', '-v']):
runfiletest(opts + [path])
def test_preparse_ordering_with_setuptools(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
class dist:
pass
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
testdir.makeconftest("""
pytest_plugins = "mytestplugin",
""")
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = testdir.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch):
pkg_resources = pytest.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
def load(self):
assert 0, "should not arrive here"
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
config = testdir.parseconfig("-p", "no:mytestplugin")
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin is None
def test_cmdline_processargs_simple(testdir):
testdir.makeconftest("""
def pytest_cmdline_preparse(args):
args.append("-h")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*pytest*",
"*-h*",
])
def test_invalid_options_show_extra_information(testdir):
"""display extra information when pytest exits due to unrecognized
options in the command-line"""
testdir.makeini("""
[pytest]
addopts = --invalid-option
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines([
"*error: unrecognized arguments: --invalid-option*",
"* inifile: %s*" % testdir.tmpdir.join('tox.ini'),
"* rootdir: %s*" % testdir.tmpdir,
])
@pytest.mark.parametrize('args', [
['dir1', 'dir2', '-v'],
['dir1', '-v', 'dir2'],
['dir2', '-v', 'dir1'],
['-v', 'dir2', 'dir1'],
])
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
"""
Consider all arguments in the command-line for rootdir and inifile
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = testdir.tmpdir.mkdir('myroot')
d1 = root.mkdir('dir1')
d2 = root.mkdir('dir2')
for i, arg in enumerate(args):
if arg == 'dir1':
args[i] = d1
elif arg == 'dir2':
args[i] = d2
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: '])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_toolongargs_issue224(testdir):
result = testdir.runpytest("-m", "hello" * 500)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_notify_exception(testdir, capfd):
config = testdir.parseconfig()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self, excrepr):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_load_initial_conftest_last_ordering(testdir):
from _pytest.config import get_config
pm = get_config().pluginmanager
class My:
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
l = hc._nonwrappers + hc._wrappers
assert l[-1].function.__module__ == "_pytest.capture"
assert l[-2].function == m.pytest_load_initial_conftests
assert l[-3].function.__module__ == "_pytest.config"
class TestWarning:
def test_warn_config(self, testdir):
testdir.makeconftest("""
l = []
def pytest_configure(config):
config.warn("C1", "hello")
def pytest_logwarning(code, message):
if message == "hello" and code == "C1":
l.append(1)
""")
testdir.makepyfile("""
def test_proper(pytestconfig):
import conftest
assert conftest.l == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_warn_on_test_item_from_request(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def fix(request):
request.node.warn("T1", "hello")
def test_hello(fix):
pass
""")
result = testdir.runpytest()
assert result.parseoutcomes()["pytest-warnings"] > 0
assert "hello" not in result.stdout.str()
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines("""
===*pytest-warning summary*===
*WT1*test_warn_on_test_item*:5*hello*
""")
class TestRootdir:
def test_simple_noini(self, tmpdir):
assert get_common_ancestor([tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir.mkdir("a"), tmpdir]) == tmpdir
assert get_common_ancestor([tmpdir, tmpdir.join("a")]) == tmpdir
with tmpdir.as_cwd():
assert get_common_ancestor([]) == tmpdir
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
def test_with_ini(self, tmpdir, name):
inifile = tmpdir.join(name)
inifile.write("[pytest]\n")
a = tmpdir.mkdir("a")
b = a.mkdir("b")
for args in ([tmpdir], [a], [b]):
rootdir, inifile, inicfg = determine_setup(None, args)
assert rootdir == tmpdir
assert inifile == inifile
rootdir, inifile, inicfg = determine_setup(None, [b,a])
assert rootdir == tmpdir
assert inifile == inifile
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
def test_pytestini_overides_empty_other(self, tmpdir, name):
inifile = tmpdir.ensure("pytest.ini")
a = tmpdir.mkdir("a")
a.ensure(name)
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile == inifile
def test_setuppy_fallback(self, tmpdir):
a = tmpdir.mkdir("a")
a.ensure("setup.cfg")
tmpdir.ensure("setup.py")
rootdir, inifile, inicfg = determine_setup(None, [a])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_nothing(self, tmpdir):
rootdir, inifile, inicfg = determine_setup(None, [tmpdir])
assert rootdir == tmpdir
assert inifile is None
assert inicfg == {}
def test_with_specific_inifile(self, tmpdir):
inifile = tmpdir.ensure("pytest.ini")
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
assert rootdir == tmpdir
| |
import pytest
from baby_steps import given, then, when
from pytest import raises
from district42 import schema
from district42.errors import DeclarationError
@pytest.mark.parametrize("length", [3, 4])
def test_list_contains_elements_len_declaration(length: int):
with given:
elements = [schema.int(1), schema.int(2), schema.int(3)]
with when:
sch = schema.list([..., *elements, ...]).len(length)
with then:
assert sch.props.elements == [..., *elements, ...]
assert sch.props.len == length
def test_list_contains_elements_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 1 given"
)
@pytest.mark.parametrize("min_length", [3, 2])
def test_list_contains_elements_min_len_declaration(min_length: int):
with given:
elements = [schema.int(1), schema.int(2), schema.int(3)]
with when:
sch = schema.list([..., *elements, ...]).len(min_length, ...)
with then:
assert sch.props.elements == [..., *elements, ...]
assert sch.props.min_len == min_length
def test_list_contains_elements_min_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(3, ...)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 3 given"
)
@pytest.mark.parametrize("max_length", [3, 4])
def test_list_contains_elements_max_len_declaration(max_length: int):
with given:
elements = [schema.int(1), schema.int(2), schema.int(3)]
with when:
sch = schema.list([..., *elements, ...]).len(..., max_length)
with then:
assert sch.props.elements == [..., *elements, ...]
assert sch.props.max_len == max_length
def test_list_contains_elements_max_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(..., 1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` max len must be greater than or equal to 2, 1 given"
)
def test_list_contains_elements_min_max_len_declaration():
with given:
elements = [schema.int(1), schema.int(2), schema.int(3)]
min_length, max_length = 3, 10
with when:
sch = schema.list([..., *elements, ...]).len(min_length, max_length)
with then:
assert sch.props.elements == [..., *elements, ...]
assert sch.props.min_len == min_length
assert sch.props.max_len == max_length
@pytest.mark.parametrize("length", [2, 3])
def test_list_contains_head_elements_len_declaration(length: int):
with given:
elements = [schema.int(1), schema.int(2)]
with when:
sch = schema.list([*elements, ...]).len(length)
with then:
assert sch.props.elements == [*elements, ...]
assert sch.props.len == length
def test_list_contains_head_elements_len_declaration_error():
with given:
sch = schema.list([schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 1 given"
)
@pytest.mark.parametrize("min_length", [2, 1])
def test_list_contains_head_elements_min_len_declaration(min_length: int):
with given:
elements = [schema.int(1), schema.int(2)]
with when:
sch = schema.list([*elements, ...]).len(min_length, ...)
with then:
assert sch.props.elements == [*elements, ...]
assert sch.props.min_len == min_length
def test_list_contains_head_elements_min_len_declaration_error():
with given:
sch = schema.list([schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(3, ...)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 3 given"
)
@pytest.mark.parametrize("max_length", [2, 3])
def test_list_contains_head_elements_max_len_declaration(max_length: int):
with given:
elements = [schema.int(1), schema.int(2)]
with when:
sch = schema.list([*elements, ...]).len(..., max_length)
with then:
assert sch.props.elements == [*elements, ...]
assert sch.props.max_len == max_length
def test_list_contains_head_elements_max_len_declaration_error():
with given:
sch = schema.list([schema.int(1), schema.int(2), ...])
with when, raises(Exception) as exception:
sch.len(..., 1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` max len must be greater than or equal to 2, 1 given"
)
def test_list_contains_head_elements_min_max_len_declaration():
with given:
elements = [schema.int(1), schema.int(2)]
min_length, max_length = 2, 10
with when:
sch = schema.list([*elements, ...]).len(min_length, max_length)
with then:
assert sch.props.elements == [*elements, ...]
assert sch.props.min_len == min_length
assert sch.props.max_len == max_length
@pytest.mark.parametrize("length", [2, 3])
def test_list_contains_tail_elements_len_declaration(length: int):
with given:
elements = [schema.int(2), schema.int(3)]
length = 2
with when:
sch = schema.list([..., *elements]).len(length)
with then:
assert sch.props.elements == [..., *elements]
assert sch.props.len == length
def test_list_contains_tail_elements_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2)])
with when, raises(Exception) as exception:
sch.len(1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 1 given"
)
@pytest.mark.parametrize("min_length", [2, 1])
def test_list_contains_tail_elements_min_len_declaration(min_length: int):
with given:
elements = [schema.int(2), schema.int(3)]
with when:
sch = schema.list([..., *elements]).len(min_length, ...)
with then:
assert sch.props.elements == [..., *elements]
assert sch.props.min_len == min_length
def test_list_contains_tail_elements_min_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2)])
with when, raises(Exception) as exception:
sch.len(3, ...)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` min len must be less than or equal to 2, 3 given"
)
@pytest.mark.parametrize("max_length", [2, 3])
def test_list_contains_tail_elements_max_len_declaration(max_length: int):
with given:
elements = [schema.int(2), schema.int(3)]
with when:
sch = schema.list([..., *elements]).len(..., max_length)
with then:
assert sch.props.elements == [..., *elements]
assert sch.props.max_len == max_length
def test_list_contains_tail_elements_max_len_declaration_error():
with given:
sch = schema.list([..., schema.int(1), schema.int(2)])
with when, raises(Exception) as exception:
sch.len(..., 1)
with then:
assert exception.type is DeclarationError
assert str(exception.value) == (
f"`{sch!r}` max len must be greater than or equal to 2, 1 given"
)
def test_list_contains_tail_elements_min_max_len_declaration():
with given:
elements = [schema.int(2), schema.int(3)]
min_length, max_length = 2, 10
with when:
sch = schema.list([..., *elements]).len(min_length, max_length)
with then:
assert sch.props.elements == [..., *elements]
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid chaincoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.stop_nodes()
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| |
# import os
# import sys
import shelve
import random
import textwrap
from time import sleep, time
from collections import namedtuple
from bearlibterminal import terminal as term
# sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../')
import spaceship.strings as strings
from .screen_functions import *
from .scene import Scene
from .option import Option
class Options(Scene):
def __init__(self, sid='options_menu'):
super().__init__(sid)
def setup(self):
self.option = Option("Options Screen")
# 80x25 -> 8x16 | 80x50 -> 8x8 | 160x50 -> 16x16 | FullScreen -> 16x16
self.option.add_opt("Screen Size",
["80x25", "80x50", "160x50", "160x100"])
# "Full Screen: {}x{}".format(sysize(0), sysize(1))])
self.option.add_opt("Cell Size", ["Auto", "8x16", "8x8", "16x16"])
self.option.add_opt("Font Choice",
["Default", "Source", "Fira", "Fira-Bold", "IBM_CGA", "Andale",
"Courier", "Unscii-8", "Unscii-8-thin", "VeraMono"])
self.option.add_opt("Coloring",
["Dynamic", "Dark", "Light", "Colorblind"])
self.prop = {
'gx': term.state(term.TK_WIDTH),
'gy': term.state(term.TK_HEIGHT),
'cx': term.state(term.TK_CELL_WIDTH),
'cy': term.state(term.TK_CELL_HEIGHT),
}
def parse_screensize(self, screensize):
if "Full Screen" in screensize:
sx = sysize(0) // term.state(term.TK_CELL_WIDTH)
sy = sysize(1) // term.state(term.TK_CELL_HEIGHT)
else:
sx, sy = list(map(lambda x: int(x), screensize.split('x')))
if (self.prop['gx'], self.prop['gy']) != (sx, sy):
self.prop['gx'], self.prop['gy'] = sx, sy
return True
return False
def parse_cellsize(self, cellsize):
if cellsize == "Auto":
if self.prop['cx'] != 'Auto':
self.prop['cx'], self.prop['cy'] = "auto", None
return True
else:
cx, cy = list(map(lambda x: int(x), cellsize.split('x')))
if (self.prop['cx'], self.prop['cy']) != (cx, cy):
self.prop['cx'], self.prop['cy'] = (cx, cy)
return True
return False
def parse_fonts(self, font):
if self.option == "Default":
term.set('font: default, size={}{}'.format(
self.prop['cx'],
'x' + str(self.prop['cy']) if self.prop['cy'] != self.prop['cx'] else ''))
else:
if self.prop['cx'] == "auto":
cy = 8 if font not in ("Andale, Courier, VeraMono") else 16
term.set("font: ./fonts/{}.ttf, size={}{}".format(
font,
8, cy))
else:
term.set("font: ./fonts/{}.ttf, size={}{}".format(
font,
self.prop['cx'],
'x'+str(self.prop['cy']) if self.prop['cy'] != self.prop['cx'] else ''))
def reset_screen(self):
if self.prop['cx'] == "auto":
term.set("window: size={}x{}, cellsize={}".format(
self.prop['gx'],
self.prop['gy'],
self.prop['cx'],
))
else:
term.set("window: size={}x{}, cellsize={}x{}".format(
*(v for _, v in self.prop.items())))
term.refresh()
def draw(self):
term.clear()
# options title
term.puts(
x=center(self.option.title, term.state(term.TK_WIDTH)),
y=1,
s=self.option.title)
# options
height = 3
for index, opt in enumerate(self.option.opts):
switch = index == self.option.optindex
expanded = index in self.option.expand
if switch:
opt = ("[[-]] " if expanded else "[[+]] ") + \
"[c=#00ffff]{}[/c]".format(opt)
else:
opt = ("[[-]] " if expanded else "[[+]] ") + opt
term.puts(term.state(term.TK_WIDTH) // 5, height, opt)
height += term.state(term.TK_HEIGHT) // 25
if expanded:
for index, subopt in enumerate(self.option.subopts[index]):
if switch:
if index == self.option.suboptindex[self.option.optindex]:
subopt = "[c=#00ffff]{}[/c]".format(subopt)
else:
subopt = subopt
term.puts(
x=term.state(term.TK_WIDTH) // 4 + 3,
y=height,
s=subopt)
height += term.state(term.TK_HEIGHT) // 25
height += term.state(term.TK_HEIGHT)//25
# Debug: Shows terminal properties -- Can remove later
term.puts(
x=term.state(term.TK_WIDTH) // 5,
y=height + 1,
s="{}".format(term.state(term.TK_WIDTH)))
term.puts(
x=term.state(term.TK_WIDTH) // 5,
y=height + 2,
s="{}".format(term.state(term.TK_HEIGHT)))
term.puts(
x=term.state(term.TK_WIDTH) // 5,
y=height + 3,
s="{}".format(term.state(term.TK_CELL_WIDTH)))
term.puts(
x=term.state(term.TK_WIDTH) // 5,
y=height + 4,
s="{}".format(term.state(term.TK_CELL_HEIGHT)))
term.refresh()
# User input during options screen
key = term.read()
if key in (term.TK_CLOSE, term.TK_Q, term.TK_ESCAPE):
self.option.reset_all()
self.ret['scene'] = 'main_menu'
self.proceed = False
elif key == term.TK_ENTER:
if self.option.optindex in self.option.expand:
# action stuff
if self.option.suboptindex[self.option.optindex] != -1:
if self.proceed:
print('SELECTED: {}|{}'.format(
self.option.opts[self.option.optindex],
self.option.subopts[self.option.optindex][self.option.suboptindex[self.option.optindex]]))
if self.option.option() == "Screen Size":
if self.parse_screensize(self.option.suboption()):
self.reset_screen()
elif self.option.option() == "Cell Size":
if self.parse_cellsize(self.option.suboption()):
self.reset_screen()
elif self.option.option() == "Font Choice":
self.parse_fonts(
self.prop,
self.option.suboption())
self.reset_screen()
else:
self.option.collapse(self.option.optindex)
else:
self.option.expansion(self.option.optindex)
# option.move_subpointer(1)
# Arrow keys (UP | DOWN)
elif key == term.TK_DOWN:
if len(self.option.expand):
self.option.move_subpointer(1)
self.option.correct_subpointer()
else:
self.option.move_pointer(1)
self.option.correct_pointer()
elif key == term.TK_UP:
if len(self.option.expand):
self.option.move_subpointer(-1)
self.option.correct_subpointer()
else:
self.option.move_pointer(-1)
self.option.correct_pointer()
if __name__ == "__main__":
term.open()
o = Options()
o.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.