sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def accept_operator(self, precedence):
"""Accept the next binary operator only if it's of higher precedence."""
match = grammar.infix(self.tokens)
if not match:
return
if match.operator.precedence < precedence:
return
# The next thing is an operator that we want. Now match it for real.
return self.tokens.accept(grammar.infix)
|
Accept the next binary operator only if it's of higher precedence.
|
entailment
|
def operator(self, lhs, min_precedence):
"""Climb operator precedence as long as there are operators.
This function implements a basic precedence climbing parser to deal
with binary operators in a sane fashion. The outer loop will keep
spinning as long as the next token is an operator with a precedence
of at least 'min_precedence', parsing operands as atoms (which,
in turn, recurse into 'expression' which recurses back into 'operator').
This supports both left- and right-associativity. The only part of the
code that's not a regular precedence-climber deals with mixfix
operators. A mixfix operator in DottySQL consists of an infix part
and a suffix (they are still binary, they just have a terminator).
"""
# Spin as long as the next token is an operator of higher
# precedence. (This may not do anything, which is fine.)
while self.accept_operator(precedence=min_precedence):
operator = self.tokens.matched.operator
# If we're parsing a mixfix operator we can keep going until
# the suffix.
if operator.suffix:
rhs = self.expression()
self.tokens.expect(common_grammar.match_tokens(operator.suffix))
rhs.end = self.tokens.matched.end
elif operator.name == ".":
# The dot operator changes the meaning of RHS.
rhs = self.dot_rhs()
else:
# The right hand side is an atom, which might turn out to be
# an expression. Isn't recursion exciting?
rhs = self.atom()
# Keep going as long as the next token is an infix operator of
# higher precedence.
next_min_precedence = operator.precedence
if operator.assoc == "left":
next_min_precedence += 1
while self.tokens.match(grammar.infix):
if (self.tokens.matched.operator.precedence
< next_min_precedence):
break
rhs = self.operator(rhs,
self.tokens.matched.operator.precedence)
lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end,
source=self.original)
return lhs
|
Climb operator precedence as long as there are operators.
This function implements a basic precedence climbing parser to deal
with binary operators in a sane fashion. The outer loop will keep
spinning as long as the next token is an operator with a precedence
of at least 'min_precedence', parsing operands as atoms (which,
in turn, recurse into 'expression' which recurses back into 'operator').
This supports both left- and right-associativity. The only part of the
code that's not a regular precedence-climber deals with mixfix
operators. A mixfix operator in DottySQL consists of an infix part
and a suffix (they are still binary, they just have a terminator).
|
entailment
|
def dot_rhs(self):
"""Match the right-hand side of a dot (.) operator.
The RHS must be a symbol token, but it is interpreted as a literal
string (because that's what goes in the AST of Resolve.)
"""
self.tokens.expect(common_grammar.symbol)
return ast.Literal(self.tokens.matched.value,
start=self.tokens.matched.start,
end=self.tokens.matched.end, source=self.original)
|
Match the right-hand side of a dot (.) operator.
The RHS must be a symbol token, but it is interpreted as a literal
string (because that's what goes in the AST of Resolve.)
|
entailment
|
def select(self):
"""First part of an SQL query."""
# Try to match the asterisk, any or list of vars.
if self.tokens.accept(grammar.select_any):
return self.select_any()
if self.tokens.accept(grammar.select_all):
# The FROM after SELECT * is required.
self.tokens.expect(grammar.select_from)
return self.select_from()
return self.select_what()
|
First part of an SQL query.
|
entailment
|
def _guess_name_of(self, expr):
"""Tries to guess what variable name 'expr' ends in.
This is a heuristic that roughly emulates what most SQL databases
name columns, based on selected variable names or applied functions.
"""
if isinstance(expr, ast.Var):
return expr.value
if isinstance(expr, ast.Resolve):
# We know the RHS of resolve is a Literal because that's what
# Parser.dot_rhs does.
return expr.rhs.value
if isinstance(expr, ast.Select) and isinstance(expr.rhs, ast.Literal):
name = self._guess_name_of(expr.lhs)
if name is not None:
return "%s_%s" % (name, expr.rhs.value)
if isinstance(expr, ast.Apply) and isinstance(expr.func, ast.Var):
return expr.func.value
|
Tries to guess what variable name 'expr' ends in.
This is a heuristic that roughly emulates what most SQL databases
name columns, based on selected variable names or applied functions.
|
entailment
|
def select_limit(self, source_expression):
"""Match LIMIT take [OFFSET drop]."""
start = self.tokens.matched.start
# The expression right after LIMIT is the count to take.
limit_count_expression = self.expression()
# Optional OFFSET follows.
if self.tokens.accept(grammar.select_offset):
offset_start = self.tokens.matched.start
offset_end = self.tokens.matched.end
# Next thing is the count to drop.
offset_count_expression = self.expression()
# We have a new source expression, which is drop(count, original).
offset_source_expression = ast.Apply(
ast.Var("drop", start=offset_start, end=offset_end,
source=self.original),
offset_count_expression,
source_expression,
start=offset_start, end=offset_count_expression.end,
source=self.original)
# Drop before taking, because obviously.
source_expression = offset_source_expression
limit_expression = ast.Apply(
ast.Var("take", start=start, end=limit_count_expression.end,
source=self.original),
limit_count_expression,
source_expression,
start=start, end=self.tokens.matched.end, source=self.original)
return limit_expression
|
Match LIMIT take [OFFSET drop].
|
entailment
|
def builtin(self, keyword):
"""Parse the pseudo-function application subgrammar."""
# The match includes the lparen token, so the keyword is just the first
# token in the match, not the whole thing.
keyword_start = self.tokens.matched.first.start
keyword_end = self.tokens.matched.first.end
self.tokens.expect(common_grammar.lparen)
if self.tokens.matched.start != keyword_end:
return self.error(
"No whitespace allowed between function and lparen.",
start_token=self.tokens.matched.first)
expr_type = grammar.BUILTINS[keyword.lower()]
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
if expr_type.arity and expr_type.arity != len(arguments):
return self.error(
"%s expects %d arguments, but was passed %d." % (
keyword, expr_type.arity, len(arguments)),
start_token=self.tokens.matched.first)
return expr_type(*arguments, start=keyword_start,
end=self.tokens.matched.end, source=self.original)
|
Parse the pseudo-function application subgrammar.
|
entailment
|
def application(self, func):
"""Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
"""
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rparen):
# That was easy.
return ast.Apply(func, start=start, end=self.tokens.matched.end,
source=self.original)
arguments = [self.expression()]
while self.tokens.accept(common_grammar.comma):
arguments.append(self.expression())
self.tokens.expect(common_grammar.rparen)
return ast.Apply(func, *arguments, start=start,
end=self.tokens.matched.end, source=self.original)
|
Parse the function application subgrammar.
Function application can, conceptually, be thought of as a mixfix
operator, similar to the way array subscripting works. However, it is
not clear at this point whether we want to allow it to work as such,
because doing so would permit queries to, at runtime, select methods
out of an arbitrary object and then call them.
While there is a function whitelist and preventing this sort of thing
in the syntax isn't a security feature, it still seems like the
syntax should make it clear what the intended use of application is.
If we later decide to extend DottySQL to allow function application
over an arbitrary LHS expression then that syntax would be a strict
superset of the current syntax and backwards compatible.
|
entailment
|
def list(self):
"""Parse a list (tuple) which can contain any combination of types."""
start = self.tokens.matched.start
if self.tokens.accept(common_grammar.rbracket):
return ast.Tuple(start=start, end=self.tokens.matched.end,
source=self.original)
elements = [self.expression()]
while self.tokens.accept(common_grammar.comma):
elements.append(self.expression())
self.tokens.expect(common_grammar.rbracket)
return ast.Tuple(*elements, start=start, end=self.tokens.matched.end,
source=self.original)
|
Parse a list (tuple) which can contain any combination of types.
|
entailment
|
def get_singleton(self):
"""If the row only has one column, return that value; otherwise raise.
Raises:
ValueError, if count of columns is not 1.
"""
only_value = None
for value in six.itervalues(self.ordered_dict):
# This loop will raise if it runs more than once.
if only_value is not None:
raise ValueError("%r is not a singleton." % self)
only_value = value
if only_value is self.__UnsetSentinel or only_value is None:
raise ValueError("%r is empty." % self)
return only_value
|
If the row only has one column, return that value; otherwise raise.
Raises:
ValueError, if count of columns is not 1.
|
entailment
|
def _cpu(self):
"""Record CPU usage."""
value = int(psutil.cpu_percent())
set_metric("cpu", value, category=self.category)
gauge("cpu", value)
|
Record CPU usage.
|
entailment
|
def _mem(self):
"""Record Memory usage."""
value = int(psutil.virtual_memory().percent)
set_metric("memory", value, category=self.category)
gauge("memory", value)
|
Record Memory usage.
|
entailment
|
def _disk(self):
"""Record Disk usage."""
mountpoints = [
p.mountpoint for p in psutil.disk_partitions()
if p.device.endswith(self.device)
]
if len(mountpoints) != 1:
raise CommandError("Unknown device: {0}".format(self.device))
value = int(psutil.disk_usage(mountpoints[0]).percent)
set_metric("disk-{0}".format(self.device), value, category=self.category)
gauge("disk-{0}".format(self.device), value)
|
Record Disk usage.
|
entailment
|
def _net(self):
"""Record Network usage."""
data = psutil.network_io_counters(pernic=True)
if self.device not in data:
raise CommandError("Unknown device: {0}".format(self.device))
# Network bytes sent
value = data[self.device].bytes_sent
metric("net-{0}-sent".format(self.device), value, category=self.category)
gauge("net-{0}-sent".format(self.device), value)
# Network bytes received
value = data[self.device].bytes_recv
metric("net-{0}-recv".format(self.device), value, category=self.category)
|
Record Network usage.
|
entailment
|
def implements(obj, protocol):
"""Does the object 'obj' implement the 'prococol'?"""
if isinstance(obj, type):
raise TypeError("First argument to implements must be an instance. "
"Got %r." % obj)
return isinstance(obj, protocol) or issubclass(AnyType, protocol)
|
Does the object 'obj' implement the 'prococol'?
|
entailment
|
def isa(cls, protocol):
"""Does the type 'cls' participate in the 'protocol'?"""
if not isinstance(cls, type):
raise TypeError("First argument to isa must be a type. Got %s." %
repr(cls))
if not isinstance(protocol, type):
raise TypeError(("Second argument to isa must be a type or a Protocol. "
"Got an instance of %r.") % type(protocol))
return issubclass(cls, protocol) or issubclass(AnyType, protocol)
|
Does the type 'cls' participate in the 'protocol'?
|
entailment
|
def implemented(cls, for_type):
"""Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions.
"""
for function in cls.required():
if not function.implemented_for_type(for_type):
raise TypeError(
"%r doesn't implement %r so it cannot participate in "
"the protocol %r." %
(for_type, function.func.__name__, cls))
cls.register(for_type)
|
Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions.
|
entailment
|
def __get_type_args(for_type=None, for_types=None):
"""Parse the arguments and return a tuple of types to implement for.
Raises:
ValueError or TypeError as appropriate.
"""
if for_type:
if for_types:
raise ValueError("Cannot pass both for_type and for_types.")
for_types = (for_type,)
elif for_types:
if not isinstance(for_types, tuple):
raise TypeError("for_types must be passed as a tuple of "
"types (classes).")
else:
raise ValueError("Must pass either for_type or for_types.")
return for_types
|
Parse the arguments and return a tuple of types to implement for.
Raises:
ValueError or TypeError as appropriate.
|
entailment
|
def implicit_static(cls, for_type=None, for_types=None):
"""Automatically generate implementations for a type.
Implement the protocol for the 'for_type' type by dispatching each
member function of the protocol to an instance method of the same name
declared on the type 'for_type'.
Arguments:
for_type: The type to implictly implement the protocol with.
Raises:
TypeError if not all implementations are provided by 'for_type'.
"""
for type_ in cls.__get_type_args(for_type, for_types):
implementations = {}
for function in cls.required():
method = getattr(type_, function.__name__, None)
if not callable(method):
raise TypeError(
"%s.implicit invokation on type %r is missing instance "
"method %r."
% (cls.__name__, type_, function.__name__))
implementations[function] = method
for function in cls.optional():
method = getattr(type_, function.__name__, None)
if callable(method):
implementations[function] = method
return cls.implement(for_type=type_,
implementations=implementations)
|
Automatically generate implementations for a type.
Implement the protocol for the 'for_type' type by dispatching each
member function of the protocol to an instance method of the same name
declared on the type 'for_type'.
Arguments:
for_type: The type to implictly implement the protocol with.
Raises:
TypeError if not all implementations are provided by 'for_type'.
|
entailment
|
def _build_late_dispatcher(func_name):
"""Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
"""
def _late_dynamic_dispatcher(obj, *args):
method = getattr(obj, func_name, None)
if not callable(method):
raise NotImplementedError(
"Instance method %r is not implemented by %r." % (
func_name, obj))
return method(*args)
return _late_dynamic_dispatcher
|
Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
|
entailment
|
def implicit_dynamic(cls, for_type=None, for_types=None):
"""Automatically generate late dynamic dispatchers to type.
This is similar to 'implicit_static', except instead of binding the
instance methods, it generates a dispatcher that will call whatever
instance method of the same name happens to be available at time of
dispatch.
This has the obvious advantage of supporting arbitrary subclasses, but
can do no verification at bind time.
Arguments:
for_type: The type to implictly implement the protocol with.
"""
for type_ in cls.__get_type_args(for_type, for_types):
implementations = {}
for function in cls.functions():
implementations[function] = cls._build_late_dispatcher(
func_name=function.__name__)
cls.implement(for_type=type_, implementations=implementations)
|
Automatically generate late dynamic dispatchers to type.
This is similar to 'implicit_static', except instead of binding the
instance methods, it generates a dispatcher that will call whatever
instance method of the same name happens to be available at time of
dispatch.
This has the obvious advantage of supporting arbitrary subclasses, but
can do no verification at bind time.
Arguments:
for_type: The type to implictly implement the protocol with.
|
entailment
|
def implement(cls, implementations, for_type=None, for_types=None):
"""Provide protocol implementation for a type.
Register all implementations of multimethod functions in this
protocol and add the type into the abstract base class of the
protocol.
Arguments:
implementations: A dict of (function, implementation), where each
function is multimethod and each implementation is a callable.
for_type: The concrete type implementations apply to.
for_types: Same as for_type, but takes a tuple of types.
You may not supply both for_type and for_types for obvious reasons.
Raises:
ValueError for arguments.
TypeError if not all implementations are provided or if there
are issues related to polymorphism (e.g. attempting to
implement a non-multimethod function.
"""
for type_ in cls.__get_type_args(for_type, for_types):
cls._implement_for_type(for_type=type_,
implementations=implementations)
|
Provide protocol implementation for a type.
Register all implementations of multimethod functions in this
protocol and add the type into the abstract base class of the
protocol.
Arguments:
implementations: A dict of (function, implementation), where each
function is multimethod and each implementation is a callable.
for_type: The concrete type implementations apply to.
for_types: Same as for_type, but takes a tuple of types.
You may not supply both for_type and for_types for obvious reasons.
Raises:
ValueError for arguments.
TypeError if not all implementations are provided or if there
are issues related to polymorphism (e.g. attempting to
implement a non-multimethod function.
|
entailment
|
def _parse_query(self, source):
"""Parse one of the rules as either objectfilter or dottysql.
Example:
_parse_query("5 + 5")
# Returns Sum(Literal(5), Literal(5))
Arguments:
source: A rule in either objectfilter or dottysql syntax.
Returns:
The AST to represent the rule.
"""
if self.OBJECTFILTER_WORDS.search(source):
syntax_ = "objectfilter"
else:
syntax_ = None # Default it is.
return query.Query(source, syntax=syntax_)
|
Parse one of the rules as either objectfilter or dottysql.
Example:
_parse_query("5 + 5")
# Returns Sum(Literal(5), Literal(5))
Arguments:
source: A rule in either objectfilter or dottysql syntax.
Returns:
The AST to represent the rule.
|
entailment
|
def _parse_tagfile(self):
"""Parse the tagfile and yield tuples of tag_name, list of rule ASTs."""
rules = None
tag = None
for line in self.original:
match = self.TAG_DECL_LINE.match(line)
if match:
if tag and rules:
yield tag, rules
rules = []
tag = match.group(1)
continue
match = self.TAG_RULE_LINE.match(line)
if match:
source = match.group(1)
rules.append(self._parse_query(source))
|
Parse the tagfile and yield tuples of tag_name, list of rule ASTs.
|
entailment
|
def normalize(expr):
"""Normalize both sides, but don't eliminate the expression."""
lhs = normalize(expr.lhs)
rhs = normalize(expr.rhs)
return type(expr)(lhs, rhs, start=lhs.start, end=rhs.end)
|
Normalize both sides, but don't eliminate the expression.
|
entailment
|
def normalize(expr):
"""No elimination, but normalize arguments."""
args = [normalize(arg) for arg in expr.args]
return type(expr)(expr.func, *args, start=expr.start, end=expr.end)
|
No elimination, but normalize arguments.
|
entailment
|
def normalize(expr):
"""Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True
"""
children = []
for child in expr.children:
branch = normalize(child)
if branch is None:
continue
if type(branch) is type(expr):
children.extend(branch.children)
else:
children.append(branch)
if len(children) == 0:
return None
if len(children) == 1:
return children[0]
return type(expr)(*children, start=children[0].start,
end=children[-1].end)
|
Pass through n-ary expressions, and eliminate empty branches.
Variadic and binary expressions recursively visit all their children.
If all children are eliminated then the parent expression is also
eliminated:
(& [removed] [removed]) => [removed]
If only one child is left, it is promoted to replace the parent node:
(& True) => True
|
entailment
|
def dedupe(items):
"""Remove duplicates from a sequence (of hashable items) while maintaining
order. NOTE: This only works if items in the list are hashable types.
Taken from the Python Cookbook, 3rd ed. Such a great book!
"""
seen = set()
for item in items:
if item not in seen:
yield item
seen.add(item)
|
Remove duplicates from a sequence (of hashable items) while maintaining
order. NOTE: This only works if items in the list are hashable types.
Taken from the Python Cookbook, 3rd ed. Such a great book!
|
entailment
|
def _date_range(self, granularity, since, to=None):
"""Returns a generator that yields ``datetime.datetime`` objects from
the ``since`` date until ``to`` (default: *now*).
* ``granularity`` -- The granularity at which the generated datetime
objects should be created: seconds, minutes, hourly, daily, weekly,
monthly, or yearly
* ``since`` -- a ``datetime.datetime`` object, from which we start
generating periods of time. This can also be ``None``, and will
default to the past 7 days if that's the case.
* ``to`` -- a ``datetime.datetime`` object, from which we start
generating periods of time. This can also be ``None``, and will
default to now if that's the case.
If ``granularity`` is one of daily, weekly, monthly, or yearly, this
function gives objects at the daily level.
If ``granularity`` is one of the following, the number of datetime
objects returned is capped, otherwise this code is really slow and
probably generates more data than we want:
* hourly: returns at most 720 values (~30 days)
* minutes: returns at most 480 values (8 hours)
* second: returns at most 300 values (5 minutes)
For example, if granularity is "seconds", we'll receive datetime
objects that differ by 1 second each.
"""
if since is None:
since = datetime.utcnow() - timedelta(days=7) # Default to 7 days
if to is None:
to = datetime.utcnow()
elapsed = (to - since)
# Figure out how many units to generate for the elapsed time.
# I'm going to use `granularity` as a keyword parameter to timedelta,
# so I need to change the wording for hours and anything > days.
if granularity == "seconds":
units = elapsed.total_seconds()
units = 300 if units > 300 else units
elif granularity == "minutes":
units = elapsed.total_seconds() / 60
units = 480 if units > 480 else units
elif granularity == "hourly":
granularity = "hours"
units = elapsed.total_seconds() / 3600
units = 720 if units > 720 else units
else:
granularity = "days"
units = elapsed.days + 1
return (to - timedelta(**{granularity: u}) for u in range(int(units)))
|
Returns a generator that yields ``datetime.datetime`` objects from
the ``since`` date until ``to`` (default: *now*).
* ``granularity`` -- The granularity at which the generated datetime
objects should be created: seconds, minutes, hourly, daily, weekly,
monthly, or yearly
* ``since`` -- a ``datetime.datetime`` object, from which we start
generating periods of time. This can also be ``None``, and will
default to the past 7 days if that's the case.
* ``to`` -- a ``datetime.datetime`` object, from which we start
generating periods of time. This can also be ``None``, and will
default to now if that's the case.
If ``granularity`` is one of daily, weekly, monthly, or yearly, this
function gives objects at the daily level.
If ``granularity`` is one of the following, the number of datetime
objects returned is capped, otherwise this code is really slow and
probably generates more data than we want:
* hourly: returns at most 720 values (~30 days)
* minutes: returns at most 480 values (8 hours)
* second: returns at most 300 values (5 minutes)
For example, if granularity is "seconds", we'll receive datetime
objects that differ by 1 second each.
|
entailment
|
def _category_slugs(self, category):
"""Returns a set of the metric slugs for the given category"""
key = self._category_key(category)
slugs = self.r.smembers(key)
return slugs
|
Returns a set of the metric slugs for the given category
|
entailment
|
def _categorize(self, slug, category):
"""Add the ``slug`` to the ``category``. We store category data as
as set, with a key of the form::
c:<category name>
The data is set of metric slugs::
"slug-a", "slug-b", ...
"""
key = self._category_key(category)
self.r.sadd(key, slug)
# Store all category names in a Redis set, for easy retrieval
self.r.sadd(self._categories_key, category)
|
Add the ``slug`` to the ``category``. We store category data as
as set, with a key of the form::
c:<category name>
The data is set of metric slugs::
"slug-a", "slug-b", ...
|
entailment
|
def _granularities(self):
"""Returns a generator of all possible granularities based on the
MIN_GRANULARITY and MAX_GRANULARITY settings.
"""
keep = False
for g in GRANULARITIES:
if g == app_settings.MIN_GRANULARITY and not keep:
keep = True
elif g == app_settings.MAX_GRANULARITY and keep:
keep = False
yield g
if keep:
yield g
|
Returns a generator of all possible granularities based on the
MIN_GRANULARITY and MAX_GRANULARITY settings.
|
entailment
|
def _build_key_patterns(self, slug, date):
"""Builds an OrderedDict of metric keys and patterns for the given slug
and date."""
# we want to keep the order, from smallest to largest granularity
patts = OrderedDict()
metric_key_patterns = self._metric_key_patterns()
for g in self._granularities():
date_string = date.strftime(metric_key_patterns[g]["date_format"])
patts[g] = metric_key_patterns[g]["key"].format(slug, date_string)
return patts
|
Builds an OrderedDict of metric keys and patterns for the given slug
and date.
|
entailment
|
def _build_keys(self, slug, date=None, granularity='all'):
"""Builds redis keys used to store metrics.
* ``slug`` -- a slug used for a metric, e.g. "user-signups"
* ``date`` -- (optional) A ``datetime.datetime`` object used to
generate the time period for the metric. If omitted, the current date
and time (in UTC) will be used.
* ``granularity`` -- Must be one of: "all" (default), "yearly",
"monthly", "weekly", "daily", "hourly", "minutes", or "seconds".
Returns a list of strings.
"""
slug = slugify(slug) # Ensure slugs have a consistent format
if date is None:
date = datetime.utcnow()
patts = self._build_key_patterns(slug, date)
if granularity == "all":
return list(patts.values())
return [patts[granularity]]
|
Builds redis keys used to store metrics.
* ``slug`` -- a slug used for a metric, e.g. "user-signups"
* ``date`` -- (optional) A ``datetime.datetime`` object used to
generate the time period for the metric. If omitted, the current date
and time (in UTC) will be used.
* ``granularity`` -- Must be one of: "all" (default), "yearly",
"monthly", "weekly", "daily", "hourly", "minutes", or "seconds".
Returns a list of strings.
|
entailment
|
def metric_slugs_by_category(self):
"""Return a dictionary of metrics data indexed by category:
{<category_name>: set(<slug1>, <slug2>, ...)}
"""
result = OrderedDict()
categories = sorted(self.r.smembers(self._categories_key))
for category in categories:
result[category] = self._category_slugs(category)
# We also need to see the uncategorized metric slugs, so need some way
# to check which slugs are not already stored.
categorized_metrics = set([ # Flatten the list of metrics
slug for sublist in result.values() for slug in sublist
])
f = lambda slug: slug not in categorized_metrics
uncategorized = list(set(filter(f, self.metric_slugs())))
if len(uncategorized) > 0:
result['Uncategorized'] = uncategorized
return result
|
Return a dictionary of metrics data indexed by category:
{<category_name>: set(<slug1>, <slug2>, ...)}
|
entailment
|
def delete_metric(self, slug):
"""Removes all keys for the given ``slug``."""
# To remove all keys for a slug, I need to retrieve them all from
# the set of metric keys, This uses the redis "keys" command, which is
# inefficient, but this shouldn't be used all that often.
prefix = "m:{0}:*".format(slug)
keys = self.r.keys(prefix)
self.r.delete(*keys) # Remove the metric data
# Finally, remove the slug from the set
self.r.srem(self._metric_slugs_key, slug)
|
Removes all keys for the given ``slug``.
|
entailment
|
def set_metric(self, slug, value, category=None, expire=None, date=None):
"""Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
keys = self._build_keys(slug, date=date)
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
# Construct a dictionary of key/values for use with mset
data = {}
for k in keys:
data[k] = value
self.r.mset(data)
# Add the category if applicable.
if category:
self._categorize(slug, category)
# Expire the Metric in ``expire`` seconds if applicable.
if expire:
for k in keys:
self.r.expire(k, expire)
|
Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
|
entailment
|
def metric(self, slug, num=1, category=None, expire=None, date=None):
"""Records a metric, creating it if it doesn't exist or incrementing it
if it does. All metrics are prefixed with 'm', and automatically
aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``num`` -- Set or Increment the metric by this number; default is 1.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
if category:
self._categorize(slug, category)
# Increment keys. NOTE: current redis-py (2.7.2) doesn't include an
# incrby method; .incr accepts a second ``amount`` parameter.
keys = self._build_keys(slug, date=date)
# Use a pipeline to speed up incrementing multiple keys
pipe = self.r.pipeline()
for key in keys:
pipe.incr(key, num)
if expire:
pipe.expire(key, expire)
pipe.execute()
|
Records a metric, creating it if it doesn't exist or incrementing it
if it does. All metrics are prefixed with 'm', and automatically
aggregate for Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``num`` -- Set or Increment the metric by this number; default is 1.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
|
entailment
|
def get_metric(self, slug):
"""Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
"""
results = OrderedDict()
granularities = self._granularities()
keys = self._build_keys(slug)
for granularity, key in zip(granularities, keys):
results[granularity] = self.r.get(key)
return results
|
Get the current values for a metric.
Returns a dictionary with metric values accumulated for the seconds,
minutes, hours, day, week, month, and year.
|
entailment
|
def get_metrics(self, slug_list):
"""Get the metrics for multiple slugs.
Returns a list of two-tuples containing the metric slug and a
dictionary like the one returned by ``get_metric``::
(
some-metric, {
'seconds': 0, 'minutes': 0, 'hours': 0,
'day': 0, 'week': 0, 'month': 0, 'year': 0
}
)
"""
# meh. I should have been consistent here, but I'm lazy, so support these
# value names instead of granularity names, but respect the min/max
# granularity settings.
keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in self._granularities()]
results = []
for slug in slug_list:
metrics = self.r.mget(*self._build_keys(slug))
if any(metrics): # Only if we have data.
results.append((slug, dict(zip(keys, metrics))))
return results
|
Get the metrics for multiple slugs.
Returns a list of two-tuples containing the metric slug and a
dictionary like the one returned by ``get_metric``::
(
some-metric, {
'seconds': 0, 'minutes': 0, 'hours': 0,
'day': 0, 'week': 0, 'month': 0, 'year': 0
}
)
|
entailment
|
def get_category_metrics(self, category):
"""Get metrics belonging to the given category"""
slug_list = self._category_slugs(category)
return self.get_metrics(slug_list)
|
Get metrics belonging to the given category
|
entailment
|
def delete_category(self, category):
"""Removes the category from Redis. This doesn't touch the metrics;
they simply become uncategorized."""
# Remove mapping of metrics-to-category
category_key = self._category_key(category)
self.r.delete(category_key)
# Remove category from Set
self.r.srem(self._categories_key, category)
|
Removes the category from Redis. This doesn't touch the metrics;
they simply become uncategorized.
|
entailment
|
def reset_category(self, category, metric_slugs):
"""Resets (or creates) a category containing a list of metrics.
* ``category`` -- A category name
* ``metric_slugs`` -- a list of all metrics that are members of the
category.
"""
key = self._category_key(category)
if len(metric_slugs) == 0:
# If there are no metrics, just remove the category
self.delete_category(category)
else:
# Save all the slugs in the category, and save the category name
self.r.sadd(key, *metric_slugs)
self.r.sadd(self._categories_key, category)
|
Resets (or creates) a category containing a list of metrics.
* ``category`` -- A category name
* ``metric_slugs`` -- a list of all metrics that are members of the
category.
|
entailment
|
def get_metric_history(self, slugs, since=None, to=None, granularity='daily'):
"""Get history for one or more metrics.
* ``slugs`` -- a slug OR a list of slugs
* ``since`` -- the date from which we start pulling metrics
* ``to`` -- the date until which we start pulling metrics
* ``granularity`` -- seconds, minutes, hourly,
daily, weekly, monthly, yearly
Returns a list of tuples containing the Redis key and the associated
metric::
r = R()
r.get_metric_history('test', granularity='weekly')
[
('m:test:w:2012-52', '15'),
]
To get history for multiple metrics, just provide a list of slugs::
metrics = ['test', 'other']
r.get_metric_history(metrics, granularity='weekly')
[
('m:test:w:2012-52', '15'),
('m:other:w:2012-52', '42'),
]
"""
if not type(slugs) == list:
slugs = [slugs]
# Build the set of Redis keys that we need to get.
keys = []
for slug in slugs:
for date in self._date_range(granularity, since, to):
keys += self._build_keys(slug, date, granularity)
keys = list(dedupe(keys))
# Fetch our data, replacing any None-values with zeros
results = [0 if v is None else v for v in self.r.mget(keys)]
results = zip(keys, results)
return sorted(results, key=lambda t: t[0])
|
Get history for one or more metrics.
* ``slugs`` -- a slug OR a list of slugs
* ``since`` -- the date from which we start pulling metrics
* ``to`` -- the date until which we start pulling metrics
* ``granularity`` -- seconds, minutes, hourly,
daily, weekly, monthly, yearly
Returns a list of tuples containing the Redis key and the associated
metric::
r = R()
r.get_metric_history('test', granularity='weekly')
[
('m:test:w:2012-52', '15'),
]
To get history for multiple metrics, just provide a list of slugs::
metrics = ['test', 'other']
r.get_metric_history(metrics, granularity='weekly')
[
('m:test:w:2012-52', '15'),
('m:other:w:2012-52', '42'),
]
|
entailment
|
def get_metric_history_as_columns(self, slugs, since=None,
granularity='daily'):
"""Provides the same data as ``get_metric_history``, but in a columnar
format. If you had the following yearly history, for example::
[
('m:bar:y:2012', '1'),
('m:bar:y:2013', '2'),
('m:foo:y:2012', '3'),
('m:foo:y:2013', '4')
]
this method would provide you with the following data structure::
[
['Period', 'bar', 'foo']
['y:2012', '1', '3'],
['y:2013', '2', '4'],
]
Note that this also includes a header column. Data in this format may
be useful for certain graphing libraries (I'm looking at you Google
Charts LineChart).
"""
history = self.get_metric_history(slugs, since, granularity=granularity)
_history = [] # new, columnar history
periods = ['Period'] # A separate, single column for the time period
for s in slugs:
column = [s] # story all the data for a single slug
for key, value in history:
# ``metric_slug`` extracts the slug from the Redis Key
if template_tags.metric_slug(key) == s:
column.append(value)
# Get time period value as first column; This value is
# duplicated in the Redis key for each value, so this is a bit
# inefficient, but... oh well.
period = template_tags.strip_metric_prefix(key)
if period not in periods:
periods.append(period)
_history.append(column) # Remember that slug's column of data
# Finally, stick the time periods in the first column.
_history.insert(0, periods)
return list(zip(*_history))
|
Provides the same data as ``get_metric_history``, but in a columnar
format. If you had the following yearly history, for example::
[
('m:bar:y:2012', '1'),
('m:bar:y:2013', '2'),
('m:foo:y:2012', '3'),
('m:foo:y:2013', '4')
]
this method would provide you with the following data structure::
[
['Period', 'bar', 'foo']
['y:2012', '1', '3'],
['y:2013', '2', '4'],
]
Note that this also includes a header column. Data in this format may
be useful for certain graphing libraries (I'm looking at you Google
Charts LineChart).
|
entailment
|
def get_metric_history_chart_data(self, slugs, since=None, granularity='daily'):
"""Provides the same data as ``get_metric_history``, but with metrics
data arranged in a format that's easy to plot with Chart.js. If you had
the following yearly history, for example::
[
('m:bar:y:2012', '1'),
('m:bar:y:2013', '2'),
('m:bar:y:2014', '3'),
('m:foo:y:2012', '4'),
('m:foo:y:2013', '5')
('m:foo:y:2014', '6')
]
this method would provide you with the following data structure::
'periods': ['y:2012', 'y:2013', 'y:2014']
'data': [
{
'slug': 'bar',
'values': [1, 2, 3]
},
{
'slug': 'foo',
'values': [4, 5, 6]
},
]
"""
slugs = sorted(slugs)
history = self.get_metric_history(slugs, since, granularity=granularity)
# Convert the history into an intermediate data structure organized
# by periods. Since the history is sorted by key (which includes both
# the slug and the date, the values should be ordered correctly.
periods = []
data = OrderedDict()
for k, v in history:
period = template_tags.strip_metric_prefix(k)
if period not in periods:
periods.append(period)
slug = template_tags.metric_slug(k)
if slug not in data:
data[slug] = []
data[slug].append(v)
# Now, reorganize data for our end result.
metrics = {'periods': periods, 'data': []}
for slug, values in data.items():
metrics['data'].append({
'slug': slug,
'values': values
})
return metrics
|
Provides the same data as ``get_metric_history``, but with metrics
data arranged in a format that's easy to plot with Chart.js. If you had
the following yearly history, for example::
[
('m:bar:y:2012', '1'),
('m:bar:y:2013', '2'),
('m:bar:y:2014', '3'),
('m:foo:y:2012', '4'),
('m:foo:y:2013', '5')
('m:foo:y:2014', '6')
]
this method would provide you with the following data structure::
'periods': ['y:2012', 'y:2013', 'y:2014']
'data': [
{
'slug': 'bar',
'values': [1, 2, 3]
},
{
'slug': 'foo',
'values': [4, 5, 6]
},
]
|
entailment
|
def gauge(self, slug, current_value):
"""Set the value for a Gauge.
* ``slug`` -- the unique identifier (or key) for the Gauge
* ``current_value`` -- the value that the gauge should display
"""
k = self._gauge_key(slug)
self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges
self.r.set(k, current_value)
|
Set the value for a Gauge.
* ``slug`` -- the unique identifier (or key) for the Gauge
* ``current_value`` -- the value that the gauge should display
|
entailment
|
def delete_gauge(self, slug):
"""Removes all gauges with the given ``slug``."""
key = self._gauge_key(slug)
self.r.delete(key) # Remove the Gauge
self.r.srem(self._gauge_slugs_key, slug)
|
Removes all gauges with the given ``slug``.
|
entailment
|
def metrics_since(slugs, years, link_type="detail", granularity=None):
"""Renders a template with a menu to view a metric (or metrics) for a
given number of years.
* ``slugs`` -- A Slug or a set/list of slugs
* ``years`` -- Number of years to show past metrics
* ``link_type`` -- What type of chart do we want ("history" or "aggregate")
* history -- use when displaying a single metric's history
* aggregate -- use when displaying aggregate metric history
* ``granularity`` -- For "history" only; show the metric's granularity;
default is "daily"
"""
now = datetime.utcnow()
# Determine if we're looking at one slug or multiple slugs
if type(slugs) in [list, set]:
slugs = "+".join(s.lower().strip() for s in slugs)
# Set the default granularity if it's omitted
granularity = granularity.lower().strip() if granularity else "daily"
# Each item is: (slug, since, text, granularity)
# Always include values for Today, 1 week, 30 days, 60 days, 90 days...
slug_values = [
(slugs, now - timedelta(days=1), "Today", granularity),
(slugs, now - timedelta(days=7), "1 Week", granularity),
(slugs, now - timedelta(days=30), "30 Days", granularity),
(slugs, now - timedelta(days=60), "60 Days", granularity),
(slugs, now - timedelta(days=90), "90 Days", granularity),
]
# Then an additional number of years
for y in range(1, years + 1):
t = now - timedelta(days=365 * y)
text = "{0} Years".format(y)
slug_values.append((slugs, t, text, granularity))
return {'slug_values': slug_values, 'link_type': link_type.lower().strip()}
|
Renders a template with a menu to view a metric (or metrics) for a
given number of years.
* ``slugs`` -- A Slug or a set/list of slugs
* ``years`` -- Number of years to show past metrics
* ``link_type`` -- What type of chart do we want ("history" or "aggregate")
* history -- use when displaying a single metric's history
* aggregate -- use when displaying aggregate metric history
* ``granularity`` -- For "history" only; show the metric's granularity;
default is "daily"
|
entailment
|
def gauge(slug, maximum=9000, size=200, coerce='float'):
"""Include a Donut Chart for the specified Gauge.
* ``slug`` -- the unique slug for the Gauge.
* ``maximum`` -- The maximum value for the gauge (default is 9000)
* ``size`` -- The size (in pixels) of the gauge (default is 200)
* ``coerce`` -- type to which gauge values should be coerced. The default
is float. Use ``{% gauge some_slug coerce='int' %}`` to coerce to integer
"""
coerce_options = {'float': float, 'int': int, 'str': str}
coerce = coerce_options.get(coerce, float)
redis = get_r()
value = coerce(redis.get_gauge(slug))
if value < maximum and coerce == float:
diff = round(maximum - value, 2)
elif value < maximum:
diff = maximum - value
else:
diff = 0
return {
'slug': slug,
'current_value': value,
'max_value': maximum,
'size': size,
'diff': diff,
}
|
Include a Donut Chart for the specified Gauge.
* ``slug`` -- the unique slug for the Gauge.
* ``maximum`` -- The maximum value for the gauge (default is 9000)
* ``size`` -- The size (in pixels) of the gauge (default is 200)
* ``coerce`` -- type to which gauge values should be coerced. The default
is float. Use ``{% gauge some_slug coerce='int' %}`` to coerce to integer
|
entailment
|
def metric_detail(slug, with_data_table=False):
"""Template Tag to display a metric's *current* detail.
* ``slug`` -- the metric's unique slug
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
granularities = list(r._granularities())
metrics = r.get_metric(slug)
metrics_data = []
for g in granularities:
metrics_data.append((g, metrics[g]))
return {
'granularities': [g.title() for g in granularities],
'slug': slug,
'metrics': metrics_data,
'with_data_table': with_data_table,
}
|
Template Tag to display a metric's *current* detail.
* ``slug`` -- the metric's unique slug
* ``with_data_table`` -- if True, prints the raw data in a table.
|
entailment
|
def metric_history(slug, granularity="daily", since=None, to=None,
with_data_table=False):
"""Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
if to and len(to) == 10: # yyyy-mm-dd
to = datetime.strptime(since, "%Y-%m-%d")
elif to and len(to) == 19: # yyyy-mm-dd HH:MM:ss
to = datetime.strptime(to, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
metric_history = r.get_metric_history(
slugs=slug,
since=since,
to=to,
granularity=granularity
)
return {
'since': since,
'to': to,
'slug': slug,
'granularity': granularity,
'metric_history': metric_history,
'with_data_table': with_data_table,
}
|
Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
|
entailment
|
def aggregate_detail(slug_list, with_data_table=False):
"""Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
metrics_data = []
granularities = r._granularities()
# XXX converting granularties into their key-name for metrics.
keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year']
key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)}
keys = [key_mapping[gran] for gran in granularities]
# Our metrics data is of the form:
#
# (slug, {time_period: value, ... }).
#
# Let's convert this to (slug, list_of_values) so that the list of
# values is in the same order as the granularties
for slug, data in r.get_metrics(slug_list):
values = [data[t] for t in keys]
metrics_data.append((slug, values))
return {
'chart_id': "metric-aggregate-{0}".format("-".join(slug_list)),
'slugs': slug_list,
'metrics': metrics_data,
'with_data_table': with_data_table,
'granularities': [g.title() for g in keys],
}
|
Template Tag to display multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``with_data_table`` -- if True, prints the raw data in a table.
|
entailment
|
def aggregate_history(slugs, granularity="daily", since=None, with_data_table=False):
"""Template Tag to display history for multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``granularity`` -- the granularity: seconds, minutes, hourly,
daily, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
slugs = list(slugs)
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
history = r.get_metric_history_chart_data(
slugs=slugs,
since=since,
granularity=granularity
)
return {
'chart_id': "metric-aggregate-history-{0}".format("-".join(slugs)),
'slugs': slugs,
'since': since,
'granularity': granularity,
'metric_history': history,
'with_data_table': with_data_table,
}
|
Template Tag to display history for multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``granularity`` -- the granularity: seconds, minutes, hourly,
daily, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``with_data_table`` -- if True, prints the raw data in a table.
|
entailment
|
def apply(query, replacements=None, vars=None, allow_io=False,
libs=("stdcore", "stdmath")):
"""Run 'query' on 'vars' and return the result(s).
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or
as an array (for positional interpolation).
vars: The variables to be supplied to the query solver.
allow_io: (Default: False) Include 'stdio' and allow IO functions.
libs: Iterable of library modules to include, given as strings.
Default: ('stdcore', 'stdmath')
For full list of bundled libraries, see efilter.stdlib.
Note: 'stdcore' must always be included.
WARNING: Including 'stdio' must be done in conjunction with
'allow_io'. This is to make enabling IO explicit. 'allow_io'
implies that 'stdio' should be included and so adding it to
libs is actually not required.
Notes on IO: If allow_io is set to True then 'stdio' will be included and
the EFILTER query will be allowed to read files from disk. Use this with
caution.
If the query returns a lazily-evaluated result that depends on reading
from a file (for example, filtering a CSV file) then the file
descriptor will remain open until the returned result is deallocated.
The caller is responsible for releasing the result when it's no longer
needed.
Returns:
The result of evaluating the query. The type of the output will depend
on the query, and can be predicted using 'infer' (provided reflection
callbacks are implemented). In the common case of a SELECT query the
return value will be an iterable of filtered data (actually an object
implementing IRepeated, as well as __iter__.)
A word on cardinality of the return value:
Types in EFILTER always refer to a scalar. If apply returns more than
one value, the type returned by 'infer' will refer to the type of
the value inside the returned container.
If you're unsure whether your query returns one or more values (rows),
use the 'getvalues' function.
Raises:
efilter.errors.EfilterError if there are issues with the query.
Examples:
apply("5 + 5") # -> 10
apply("SELECT * FROM people WHERE age > 10",
vars={"people":({"age": 10, "name": "Bob"},
{"age": 20, "name": "Alice"},
{"age": 30, "name": "Eve"}))
# This will replace the question mark (?) with the string "Bob" in a
# safe manner, preventing SQL injection.
apply("SELECT * FROM people WHERE name = ?", replacements=["Bob"], ...)
"""
if vars is None:
vars = {}
if allow_io:
libs = list(libs)
libs.append("stdio")
query = q.Query(query, params=replacements)
stdcore_included = False
for lib in libs:
if lib == "stdcore":
stdcore_included = True
# 'solve' always includes this automatically - we don't have a say
# in the matter.
continue
if lib == "stdio" and not allow_io:
raise ValueError("Attempting to include 'stdio' but IO not "
"enabled. Pass allow_io=True.")
module = std_core.LibraryModule.ALL_MODULES.get(lib)
if not lib:
raise ValueError("There is no standard library module %r." % lib)
vars = scope.ScopeStack(module, vars)
if not stdcore_included:
raise ValueError("EFILTER cannot work without standard lib 'stdcore'.")
results = solve.solve(query, vars).value
return results
|
Run 'query' on 'vars' and return the result(s).
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or
as an array (for positional interpolation).
vars: The variables to be supplied to the query solver.
allow_io: (Default: False) Include 'stdio' and allow IO functions.
libs: Iterable of library modules to include, given as strings.
Default: ('stdcore', 'stdmath')
For full list of bundled libraries, see efilter.stdlib.
Note: 'stdcore' must always be included.
WARNING: Including 'stdio' must be done in conjunction with
'allow_io'. This is to make enabling IO explicit. 'allow_io'
implies that 'stdio' should be included and so adding it to
libs is actually not required.
Notes on IO: If allow_io is set to True then 'stdio' will be included and
the EFILTER query will be allowed to read files from disk. Use this with
caution.
If the query returns a lazily-evaluated result that depends on reading
from a file (for example, filtering a CSV file) then the file
descriptor will remain open until the returned result is deallocated.
The caller is responsible for releasing the result when it's no longer
needed.
Returns:
The result of evaluating the query. The type of the output will depend
on the query, and can be predicted using 'infer' (provided reflection
callbacks are implemented). In the common case of a SELECT query the
return value will be an iterable of filtered data (actually an object
implementing IRepeated, as well as __iter__.)
A word on cardinality of the return value:
Types in EFILTER always refer to a scalar. If apply returns more than
one value, the type returned by 'infer' will refer to the type of
the value inside the returned container.
If you're unsure whether your query returns one or more values (rows),
use the 'getvalues' function.
Raises:
efilter.errors.EfilterError if there are issues with the query.
Examples:
apply("5 + 5") # -> 10
apply("SELECT * FROM people WHERE age > 10",
vars={"people":({"age": 10, "name": "Bob"},
{"age": 20, "name": "Alice"},
{"age": 30, "name": "Eve"}))
# This will replace the question mark (?) with the string "Bob" in a
# safe manner, preventing SQL injection.
apply("SELECT * FROM people WHERE name = ?", replacements=["Bob"], ...)
|
entailment
|
def user_func(func, arg_types=None, return_type=None):
"""Create an EFILTER-callable version of function 'func'.
As a security precaution, EFILTER will not execute Python callables
unless they implement the IApplicative protocol. There is a perfectly good
implementation of this protocol in the standard library and user functions
can inherit from it.
This will declare a subclass of the standard library TypedFunction and
return an instance of it that EFILTER will happily call.
Arguments:
func: A Python callable that will serve as the implementation.
arg_types (optional): A tuple of argument types. If the function takes
keyword arguments, they must still have a defined order.
return_type (optional): The type the function returns.
Returns:
An instance of a custom subclass of efilter.stdlib.core.TypedFunction.
Examples:
def my_callback(tag):
print("I got %r" % tag)
api.apply("if True then my_callback('Hello World!')",
vars={
"my_callback": api.user_func(my_callback)
})
# This should print "I got 'Hello World!'".
"""
class UserFunction(std_core.TypedFunction):
name = func.__name__
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
@classmethod
def reflect_static_args(cls):
return arg_types
@classmethod
def reflect_static_return(cls):
return return_type
return UserFunction()
|
Create an EFILTER-callable version of function 'func'.
As a security precaution, EFILTER will not execute Python callables
unless they implement the IApplicative protocol. There is a perfectly good
implementation of this protocol in the standard library and user functions
can inherit from it.
This will declare a subclass of the standard library TypedFunction and
return an instance of it that EFILTER will happily call.
Arguments:
func: A Python callable that will serve as the implementation.
arg_types (optional): A tuple of argument types. If the function takes
keyword arguments, they must still have a defined order.
return_type (optional): The type the function returns.
Returns:
An instance of a custom subclass of efilter.stdlib.core.TypedFunction.
Examples:
def my_callback(tag):
print("I got %r" % tag)
api.apply("if True then my_callback('Hello World!')",
vars={
"my_callback": api.user_func(my_callback)
})
# This should print "I got 'Hello World!'".
|
entailment
|
def infer(query, replacements=None, root_type=None,
libs=("stdcore", "stdmath")):
"""Determine the type of the query's output without actually running it.
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or as
an array (for positional interpolation).
root_type: The types of variables to be supplied to the query inference.
libs: What standard libraries should be taken into account for the
inference.
Returns:
The type of the query's output, if it can be determined. If undecidable,
returns efilter.protocol.AnyType.
NOTE: The inference returns the type of a row in the results, not of the
actual Python object returned by 'apply'. For example, if a query
returns multiple rows, each one of which is an integer, the type of the
output is considered to be int, not a collection of rows.
Examples:
infer("5 + 5") # -> INumber
infer("SELECT * FROM people WHERE age > 10") # -> AnyType
# If root_type implements the IStructured reflection API:
infer("SELECT * FROM people WHERE age > 10", root_type=...) # -> dict
"""
# Always make the scope stack start with stdcore.
if root_type:
type_scope = scope.ScopeStack(std_core.MODULE, root_type)
else:
type_scope = scope.ScopeStack(std_core.MODULE)
stdcore_included = False
for lib in libs:
if lib == "stdcore":
stdcore_included = True
continue
module = std_core.LibraryModule.ALL_MODULES.get(lib)
if not module:
raise TypeError("No standard library module %r." % lib)
type_scope = scope.ScopeStack(module, type_scope)
if not stdcore_included:
raise TypeError("'stdcore' must always be included.")
query = q.Query(query, params=replacements)
return infer_type.infer_type(query, type_scope)
|
Determine the type of the query's output without actually running it.
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or as
an array (for positional interpolation).
root_type: The types of variables to be supplied to the query inference.
libs: What standard libraries should be taken into account for the
inference.
Returns:
The type of the query's output, if it can be determined. If undecidable,
returns efilter.protocol.AnyType.
NOTE: The inference returns the type of a row in the results, not of the
actual Python object returned by 'apply'. For example, if a query
returns multiple rows, each one of which is an integer, the type of the
output is considered to be int, not a collection of rows.
Examples:
infer("5 + 5") # -> INumber
infer("SELECT * FROM people WHERE age > 10") # -> AnyType
# If root_type implements the IStructured reflection API:
infer("SELECT * FROM people WHERE age > 10", root_type=...) # -> dict
|
entailment
|
def search(query, data, replacements=None):
"""Yield objects from 'data' that match the 'query'."""
query = q.Query(query, params=replacements)
for entry in data:
if solve.solve(query, entry).value:
yield entry
|
Yield objects from 'data' that match the 'query'.
|
entailment
|
def peek(self, steps=1):
"""Look ahead, doesn't affect current_token and next_token."""
try:
tokens = iter(self)
for _ in six.moves.range(steps):
next(tokens)
return next(tokens)
except StopIteration:
return None
|
Look ahead, doesn't affect current_token and next_token.
|
entailment
|
def skip(self, steps=1):
"""Skip ahead by 'steps' tokens."""
for _ in six.moves.range(steps):
self.next_token()
|
Skip ahead by 'steps' tokens.
|
entailment
|
def next_token(self):
"""Returns the next logical token, advancing the tokenizer."""
if self.lookahead:
self.current_token = self.lookahead.popleft()
return self.current_token
self.current_token = self._parse_next_token()
return self.current_token
|
Returns the next logical token, advancing the tokenizer.
|
entailment
|
def _parse_next_token(self):
"""Will parse patterns until it gets to the next token or EOF."""
while self._position < self.limit:
token = self._next_pattern()
if token:
return token
return None
|
Will parse patterns until it gets to the next token or EOF.
|
entailment
|
def _next_pattern(self):
"""Parses the next pattern by matching each in turn."""
current_state = self.state_stack[-1]
position = self._position
for pattern in self.patterns:
if current_state not in pattern.states:
continue
m = pattern.regex.match(self.source, position)
if not m:
continue
position = m.end()
token = None
if pattern.next_state:
self.state_stack.append(pattern.next_state)
if pattern.action:
callback = getattr(self, pattern.action, None)
if callback is None:
raise RuntimeError(
"No method defined for pattern action %s!" %
pattern.action)
if "token" in m.groups():
value = m.group("token")
else:
value = m.group(0)
token = callback(string=value, match=m,
pattern=pattern)
self._position = position
return token
self._error("Don't know how to match next. Did you forget quotes?",
start=self._position, end=self._position + 1)
|
Parses the next pattern by matching each in turn.
|
entailment
|
def _error(self, message, start, end=None):
"""Raise a nice error, with the token highlighted."""
raise errors.EfilterParseError(
source=self.source, start=start, end=end, message=message)
|
Raise a nice error, with the token highlighted.
|
entailment
|
def emit(self, string, match, pattern, **_):
"""Emits a token using the current pattern match and pattern label."""
return grammar.Token(name=pattern.name, value=string,
start=match.start(), end=match.end())
|
Emits a token using the current pattern match and pattern label.
|
entailment
|
def get_pkg_version():
"""Get version string by parsing PKG-INFO."""
try:
with open("PKG-INFO", "r") as fp:
rgx = re.compile(r"Version: (\d+)")
for line in fp.readlines():
match = rgx.match(line)
if match:
return match.group(1)
except IOError:
return None
|
Get version string by parsing PKG-INFO.
|
entailment
|
def get_version(dev_version=False):
"""Generates a version string.
Arguments:
dev_version: Generate a verbose development version from git commits.
Examples:
1.1
1.1.dev43 # If 'dev_version' was passed.
"""
if dev_version:
version = git_dev_version()
if not version:
raise RuntimeError("Could not generate dev version from git.")
return version
return "1!%d.%d" % (MAJOR, MINOR)
|
Generates a version string.
Arguments:
dev_version: Generate a verbose development version from git commits.
Examples:
1.1
1.1.dev43 # If 'dev_version' was passed.
|
entailment
|
def _heartbeat(self):
"""
**Purpose**: Method to be executed in the heartbeat thread. This method sends a 'request' to the
heartbeat-req queue. It expects a 'response' message from the 'heartbeart-res' queue within 10 seconds. This
message should contain the same correlation id. If no message if received in 10 seconds, the tmgr is assumed
dead. The end_manager() is called to cleanly terminate tmgr process and the heartbeat thread is also
terminated.
**Details**: The AppManager can re-invoke both if the execution is still not complete.
"""
try:
self._prof.prof('heartbeat thread started', uid=self._uid)
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
response = True
while (response and (not self._hb_terminate.is_set())):
response = False
corr_id = str(uuid.uuid4())
# Heartbeat request signal sent to task manager via rpc-queue
mq_channel.basic_publish(exchange='',
routing_key=self._hb_request_q,
properties=pika.BasicProperties(
reply_to=self._hb_response_q,
correlation_id=corr_id),
body='request')
self._logger.info('Sent heartbeat request')
# mq_connection.close()
# Sleep for hb_interval and then check if tmgr responded
mq_connection.sleep(self._hb_interval)
# mq_connection = pika.BlockingConnection(
# pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
# mq_channel = mq_connection.channel()
method_frame, props, body = mq_channel.basic_get(queue=self._hb_response_q)
if body:
if corr_id == props.correlation_id:
self._logger.info('Received heartbeat response')
response = True
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# Appease pika cos it thinks the connection is dead
# mq_connection.close()
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel tmgr process gracefully...')
raise KeyboardInterrupt
except Exception as ex:
self._logger.exception('Heartbeat failed with error: %s' % ex)
raise
finally:
try:
mq_connection.close()
except:
self._logger.warning('mq_connection not created')
self._prof.prof('terminating heartbeat thread', uid=self._uid)
|
**Purpose**: Method to be executed in the heartbeat thread. This method sends a 'request' to the
heartbeat-req queue. It expects a 'response' message from the 'heartbeart-res' queue within 10 seconds. This
message should contain the same correlation id. If no message if received in 10 seconds, the tmgr is assumed
dead. The end_manager() is called to cleanly terminate tmgr process and the heartbeat thread is also
terminated.
**Details**: The AppManager can re-invoke both if the execution is still not complete.
|
entailment
|
def _tmgr(self, uid, rmgr, logger, mq_hostname, port, pending_queue, completed_queue):
"""
**Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue
and submits it to the RTS. At all state transititons, they are synced (blocking) with the AppManager
in the master process.
In addition, the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a
'response' message to the 'heartbeart-res' queue.
**Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is
still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of
the tasks on the remote machine.
"""
raise NotImplementedError('_tmgr() method ' +
'not implemented in TaskManager for %s' % self._rts)
|
**Purpose**: Method to be run by the tmgr process. This method receives a Task from the pending_queue
and submits it to the RTS. At all state transititons, they are synced (blocking) with the AppManager
in the master process.
In addition, the tmgr also receives heartbeat 'request' msgs from the heartbeat-req queue. It responds with a
'response' message to the 'heartbeart-res' queue.
**Details**: The AppManager can re-invoke the tmgr process with this function if the execution of the workflow is
still incomplete. There is also population of a dictionary, placeholder_dict, which stores the path of each of
the tasks on the remote machine.
|
entailment
|
def start_heartbeat(self):
"""
**Purpose**: Method to start the heartbeat thread. The heartbeat function
is not to be accessed directly. The function is started in a separate
thread using this method.
"""
if not self._hb_thread:
try:
self._logger.info('Starting heartbeat thread')
self._prof.prof('creating heartbeat thread', uid=self._uid)
self._hb_terminate = threading.Event()
self._hb_thread = threading.Thread(target=self._heartbeat, name='heartbeat')
self._prof.prof('starting heartbeat thread', uid=self._uid)
self._hb_thread.start()
return True
except Exception, ex:
self._logger.exception('Heartbeat not started, error: %s' % ex)
self.terminate_heartbeat()
raise
else:
self._logger.warn('Heartbeat thread already running, but attempted to restart!')
|
**Purpose**: Method to start the heartbeat thread. The heartbeat function
is not to be accessed directly. The function is started in a separate
thread using this method.
|
entailment
|
def terminate_heartbeat(self):
"""
**Purpose**: Method to terminate the heartbeat thread. This method is
blocking as it waits for the heartbeat thread to terminate (aka join).
This is the last method that is executed from the TaskManager and
hence closes the profiler.
"""
try:
if self._hb_thread:
self._hb_terminate.set()
if self.check_heartbeat():
self._hb_thread.join()
self._hb_thread = None
self._logger.info('Hearbeat thread terminated')
self._prof.prof('heartbeat thread terminated', uid=self._uid)
# We close in the heartbeat because it ends after the tmgr process
self._prof.close()
except Exception, ex:
self._logger.exception('Could not terminate heartbeat thread')
raise
finally:
if not (self.check_heartbeat() or self.check_manager()):
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
# To respond to heartbeat - get request from rpc_queue
mq_channel.queue_delete(queue=self._hb_response_q)
mq_channel.queue_delete(queue=self._hb_request_q)
mq_connection.close()
|
**Purpose**: Method to terminate the heartbeat thread. This method is
blocking as it waits for the heartbeat thread to terminate (aka join).
This is the last method that is executed from the TaskManager and
hence closes the profiler.
|
entailment
|
def terminate_manager(self):
"""
**Purpose**: Method to terminate the tmgr process. This method is
blocking as it waits for the tmgr process to terminate (aka join).
"""
try:
if self._tmgr_process:
if not self._tmgr_terminate.is_set():
self._tmgr_terminate.set()
if self.check_manager():
self._tmgr_process.join()
self._tmgr_process = None
self._logger.info('Task manager process closed')
self._prof.prof('tmgr process terminated', uid=self._uid)
except Exception, ex:
self._logger.exception('Could not terminate task manager process')
raise
|
**Purpose**: Method to terminate the tmgr process. This method is
blocking as it waits for the tmgr process to terminate (aka join).
|
entailment
|
def getvalues(self):
"""Yields all the values from 'generator_func' and type-checks.
Yields:
Whatever 'generator_func' yields.
Raises:
TypeError: if subsequent values are of a different type than first
value.
ValueError: if subsequent iteration returns a different number of
values than the first iteration over the generator. (This would
mean 'generator_func' is not stable.)
"""
idx = 0
generator = self._generator_func()
first_value = next(generator)
self._value_type = type(first_value)
yield first_value
for idx, value in enumerate(generator):
if not isinstance(value, self._value_type):
raise TypeError(
"All values of a repeated var must be of the same type."
" First argument was of type %r, but argument %r is of"
" type %r." %
(self._value_type, value, repeated.value_type(value)))
self._watermark = max(self._watermark, idx + 1)
yield value
# Iteration stopped - check if we're at the previous watermark and raise
# if not.
if idx + 1 < self._watermark:
raise ValueError(
"LazyRepetition %r was previously able to iterate its"
" generator up to idx %d, but this time iteration stopped after"
" idx %d! Generator function %r is not stable." %
(self, self._watermark, idx + 1, self._generator_func))
# Watermark is higher than previous count! Generator function returned
# more values this time than last time.
if self._count is not None and self._watermark >= self._count:
raise ValueError(
"LazyRepetition %r previously iterated only up to idx %d but"
" was now able to reach idx %d! Generator function %r is not"
" stable." %
(self, self._count - 1, idx + 1, self._generator_func))
# We've finished iteration - cache count. After this the count will be
# watermark + 1 forever.
self._count = self._watermark + 1
|
Yields all the values from 'generator_func' and type-checks.
Yields:
Whatever 'generator_func' yields.
Raises:
TypeError: if subsequent values are of a different type than first
value.
ValueError: if subsequent iteration returns a different number of
values than the first iteration over the generator. (This would
mean 'generator_func' is not stable.)
|
entailment
|
def value_eq(self, other):
"""Sorted comparison of values."""
self_sorted = ordered.ordered(self.getvalues())
other_sorted = ordered.ordered(repeated.getvalues(other))
return self_sorted == other_sorted
|
Sorted comparison of values.
|
entailment
|
def call_audit(func):
"""Print a detailed audit of all calls to this function."""
def audited_func(*args, **kwargs):
import traceback
stack = traceback.extract_stack()
r = func(*args, **kwargs)
func_name = func.__name__
print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % (
len(stack),
" -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]),
func_name,
args,
kwargs,
r))
return r
return audited_func
|
Print a detailed audit of all calls to this function.
|
entailment
|
def _class_dispatch(args, kwargs):
"""See 'class_multimethod'."""
_ = kwargs
if not args:
raise ValueError(
"Multimethods must be passed at least one positional arg.")
if not isinstance(args[0], type):
raise TypeError(
"class_multimethod must be called with a type, not instance.")
return args[0]
|
See 'class_multimethod'.
|
entailment
|
def prefer_type(self, prefer, over):
"""Prefer one type over another type, all else being equivalent.
With abstract base classes (Python's abc module) it is possible for
a type to appear to be a subclass of another type without the supertype
appearing in the subtype's MRO. As such, the supertype has no order
with respect to other supertypes, and this may lead to amguity if two
implementations are provided for unrelated abstract types.
In such cases, it is possible to disambiguate by explictly telling the
function to prefer one type over the other.
Arguments:
prefer: Preferred type (class).
over: The type we don't like (class).
Raises:
ValueError: In case of logical conflicts.
"""
self._write_lock.acquire()
try:
if self._preferred(preferred=over, over=prefer):
raise ValueError(
"Type %r is already preferred over %r." % (over, prefer))
prefs = self._prefer_table.setdefault(prefer, set())
prefs.add(over)
finally:
self._write_lock.release()
|
Prefer one type over another type, all else being equivalent.
With abstract base classes (Python's abc module) it is possible for
a type to appear to be a subclass of another type without the supertype
appearing in the subtype's MRO. As such, the supertype has no order
with respect to other supertypes, and this may lead to amguity if two
implementations are provided for unrelated abstract types.
In such cases, it is possible to disambiguate by explictly telling the
function to prefer one type over the other.
Arguments:
prefer: Preferred type (class).
over: The type we don't like (class).
Raises:
ValueError: In case of logical conflicts.
|
entailment
|
def _find_and_cache_best_function(self, dispatch_type):
"""Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
"""
result = self._dispatch_table.get(dispatch_type)
if result:
return result
# The outer try ensures the lock is always released.
with self._write_lock:
try:
dispatch_mro = dispatch_type.mro()
except TypeError:
# Not every type has an MRO.
dispatch_mro = ()
best_match = None
result_type = None
for candidate_type, candidate_func in self.implementations:
if not issubclass(dispatch_type, candidate_type):
# Skip implementations that are obviously unrelated.
continue
try:
# The candidate implementation may be for a type that's
# actually in the MRO, or it may be for an abstract type.
match = dispatch_mro.index(candidate_type)
except ValueError:
# This means we have an implementation for an abstract
# type, which ranks below all concrete types.
match = None
if best_match is None:
if result and match is None:
# Already have a result, and no order of preference.
# This is probably because the type is a member of two
# abstract types and we have separate implementations
# for those two abstract types.
if self._preferred(candidate_type, over=result_type):
result = candidate_func
result_type = candidate_type
elif self._preferred(result_type, over=candidate_type):
# No need to update anything.
pass
else:
raise TypeError(
"Two candidate implementations found for "
"multimethod function %s (dispatch type %s) "
"and neither is preferred." %
(self.func_name, dispatch_type))
else:
result = candidate_func
result_type = candidate_type
best_match = match
if (match or 0) < (best_match or 0):
result = candidate_func
result_type = candidate_type
best_match = match
self._dispatch_table[dispatch_type] = result
return result
|
Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
|
entailment
|
def __get_types(for_type=None, for_types=None):
"""Parse the arguments and return a tuple of types to implement for.
Raises:
ValueError or TypeError as appropriate.
"""
if for_type:
if for_types:
raise ValueError("Cannot pass both for_type and for_types.")
for_types = (for_type,)
elif for_types:
if not isinstance(for_types, tuple):
raise TypeError("for_types must be passed as a tuple of "
"types (classes).")
else:
raise ValueError("Must pass either for_type or for_types.")
return for_types
|
Parse the arguments and return a tuple of types to implement for.
Raises:
ValueError or TypeError as appropriate.
|
entailment
|
def implementation(self, for_type=None, for_types=None):
"""Return a decorator that will register the implementation.
Example:
@multimethod
def add(x, y):
pass
@add.implementation(for_type=int)
def add(x, y):
return x + y
@add.implementation(for_type=SomeType)
def add(x, y):
return int(x) + int(y)
"""
for_types = self.__get_types(for_type, for_types)
def _decorator(implementation):
self.implement(implementation, for_types=for_types)
return self
return _decorator
|
Return a decorator that will register the implementation.
Example:
@multimethod
def add(x, y):
pass
@add.implementation(for_type=int)
def add(x, y):
return x + y
@add.implementation(for_type=SomeType)
def add(x, y):
return int(x) + int(y)
|
entailment
|
def implement(self, implementation, for_type=None, for_types=None):
"""Registers an implementing function for for_type.
Arguments:
implementation: Callable implementation for this type.
for_type: The type this implementation applies to.
for_types: Same as for_type, but takes a tuple of types.
for_type and for_types cannot both be passed (for obvious reasons.)
Raises:
ValueError
"""
unbound_implementation = self.__get_unbound_function(implementation)
for_types = self.__get_types(for_type, for_types)
for t in for_types:
self._write_lock.acquire()
try:
self.implementations.append((t, unbound_implementation))
finally:
self._write_lock.release()
|
Registers an implementing function for for_type.
Arguments:
implementation: Callable implementation for this type.
for_type: The type this implementation applies to.
for_types: Same as for_type, but takes a tuple of types.
for_type and for_types cannot both be passed (for obvious reasons.)
Raises:
ValueError
|
entailment
|
def to_int_list(values):
"""Converts the given list of vlues into a list of integers. If the
integer conversion fails (e.g. non-numeric strings or None-values), this
filter will include a 0 instead."""
results = []
for v in values:
try:
results.append(int(v))
except (TypeError, ValueError):
results.append(0)
return results
|
Converts the given list of vlues into a list of integers. If the
integer conversion fails (e.g. non-numeric strings or None-values), this
filter will include a 0 instead.
|
entailment
|
def _validate_resource_desc(self):
"""
**Purpose**: Validate the resource description provided to the ResourceManager
"""
self._prof.prof('validating rdesc', uid=self._uid)
self._logger.debug('Validating resource description')
expected_keys = ['resource',
'walltime',
'cpus']
for key in expected_keys:
if key not in self._resource_desc:
raise MissingError(obj='resource description', missing_attribute=key)
if not isinstance(self._resource_desc['resource'], str):
raise TypeError(expected_type=str, actual_type=type(self._resource_desc['resource']))
if not isinstance(self._resource_desc['walltime'], int):
raise TypeError(expected_type=int, actual_type=type(self._resource_desc['walltime']))
if not isinstance(self._resource_desc['cpus'], int):
raise TypeError(expected_type=int, actual_type=type(self._resource_desc['cpus']))
if 'gpus' in self._resource_desc:
if (not isinstance(self._resource_desc['gpus'], int)):
raise TypeError(expected_type=int, actual_type=type(self._resource_desc['project']))
if 'project' in self._resource_desc:
if (not isinstance(self._resource_desc['project'], str)) and (not self._resource_desc['project']):
raise TypeError(expected_type=str, actual_type=type(self._resource_desc['project']))
if 'access_schema' in self._resource_desc:
if not isinstance(self._resource_desc['access_schema'], str):
raise TypeError(expected_type=str, actual_type=type(self._resource_desc['access_schema']))
if 'queue' in self._resource_desc:
if not isinstance(self._resource_desc['queue'], str):
raise TypeError(expected_type=str, actual_type=type(self._resource_desc['queue']))
if not isinstance(self._rts_config, dict):
raise TypeError(expected_type=dict, actual_type=type(self._rts_config))
self._validated = True
self._logger.info('Resource description validated')
self._prof.prof('rdesc validated', uid=self._uid)
return self._validated
|
**Purpose**: Validate the resource description provided to the ResourceManager
|
entailment
|
def _populate(self):
"""
**Purpose**: Populate the ResourceManager class with the validated
resource description
"""
if self._validated:
self._prof.prof('populating rmgr', uid=self._uid)
self._logger.debug('Populating resource manager object')
self._resource = self._resource_desc['resource']
self._walltime = self._resource_desc['walltime']
self._cpus = self._resource_desc['cpus']
self._gpus = self._resource_desc.get('gpus', 0)
self._project = self._resource_desc.get('project', None)
self._access_schema = self._resource_desc.get('access_schema', None)
self._queue = self._resource_desc.get('queue', None)
self._logger.debug('Resource manager population successful')
self._prof.prof('rmgr populated', uid=self._uid)
else:
raise EnTKError('Resource description not validated')
|
**Purpose**: Populate the ResourceManager class with the validated
resource description
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the Gauge slugs and data in the context."""
data = super(GaugesView, self).get_context_data(**kwargs)
data.update({'gauges': get_r().gauge_slugs()})
return data
|
Includes the Gauge slugs and data in the context.
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the metrics slugs in the context."""
data = super(MetricsListView, self).get_context_data(**kwargs)
# Metrics organized by category, like so:
# { <category_name>: [ <slug1>, <slug2>, ... ]}
data.update({'metrics': get_r().metric_slugs_by_category()})
return data
|
Includes the metrics slugs in the context.
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the metrics slugs in the context."""
data = super(MetricDetailView, self).get_context_data(**kwargs)
data['slug'] = kwargs['slug']
data['granularities'] = list(get_r()._granularities())
return data
|
Includes the metrics slugs in the context.
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the metrics slugs in the context."""
data = super(MetricHistoryView, self).get_context_data(**kwargs)
# Accept GET query params for ``since``
since = self.request.GET.get('since', None)
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
data.update({
'since': since,
'slug': kwargs['slug'],
'granularity': kwargs['granularity'],
'granularities': list(get_r()._granularities()),
})
return data
|
Includes the metrics slugs in the context.
|
entailment
|
def get_success_url(self):
"""Reverses the ``redis_metric_aggregate_detail`` URL using
``self.metric_slugs`` as an argument."""
slugs = '+'.join(self.metric_slugs)
url = reverse('redis_metric_aggregate_detail', args=[slugs])
# Django 1.6 quotes reversed URLs, which changes + into %2B. We want
# want to keep the + in the url (it's ok according to RFC 1738)
# https://docs.djangoproject.com/en/1.6/releases/1.6/#quoting-in-reverse
return url.replace("%2B", "+")
|
Reverses the ``redis_metric_aggregate_detail`` URL using
``self.metric_slugs`` as an argument.
|
entailment
|
def form_valid(self, form):
"""Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
"""
self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']]
return super(AggregateFormView, self).form_valid(form)
|
Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the metrics slugs in the context."""
r = get_r()
category = kwargs.pop('category', None)
data = super(AggregateDetailView, self).get_context_data(**kwargs)
if category:
slug_set = r._category_slugs(category)
else:
slug_set = set(kwargs['slugs'].split('+'))
data['granularities'] = list(r._granularities())
data['slugs'] = slug_set
data['category'] = category
return data
|
Includes the metrics slugs in the context.
|
entailment
|
def get_context_data(self, **kwargs):
"""Includes the metrics slugs in the context."""
r = get_r()
data = super(AggregateHistoryView, self).get_context_data(**kwargs)
slug_set = set(kwargs['slugs'].split('+'))
granularity = kwargs.get('granularity', 'daily')
# Accept GET query params for ``since``
since = self.request.GET.get('since', None)
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
data.update({
'slugs': slug_set,
'granularity': granularity,
'since': since,
'granularities': list(r._granularities())
})
return data
|
Includes the metrics slugs in the context.
|
entailment
|
def get(self, *args, **kwargs):
"""See if this view was called with a specified category."""
self.initial = {"category_name": kwargs.get('category_name', None)}
return super(CategoryFormView, self).get(*args, **kwargs)
|
See if this view was called with a specified category.
|
entailment
|
def form_valid(self, form):
"""Get the category name/metric slugs from the form, and update the
category so contains the given metrics."""
form.categorize_metrics()
return super(CategoryFormView, self).form_valid(form)
|
Get the category name/metric slugs from the form, and update the
category so contains the given metrics.
|
entailment
|
def rerun(self):
"""
Rerun sets the state of the Pipeline to scheduling so that the Pipeline
can be checked for new stages
"""
self._state = states.SCHEDULING
self._completed_flag = threading.Event()
print 'Pipeline %s in %s state'%(self._uid, self._state)
|
Rerun sets the state of the Pipeline to scheduling so that the Pipeline
can be checked for new stages
|
entailment
|
def to_dict(self):
"""
Convert current Pipeline (i.e. its attributes) into a dictionary
:return: python dictionary
"""
pipeline_desc_as_dict = {
'uid': self._uid,
'name': self._name,
'state': self._state,
'state_history': self._state_history,
'completed': self._completed_flag.is_set()
}
return pipeline_desc_as_dict
|
Convert current Pipeline (i.e. its attributes) into a dictionary
:return: python dictionary
|
entailment
|
def from_dict(self, d):
"""
Create a Pipeline from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
"""
if 'uid' in d:
if d['uid']:
self._uid = d['uid']
if 'name' in d:
if d['name']:
self._name = d['name']
if 'state' in d:
if isinstance(d['state'], str) or isinstance(d['state'], unicode):
if d['state'] in states._pipeline_state_values.keys():
self._state = d['state']
else:
raise ValueError(obj=self._uid,
attribute='state',
expected_value=states._pipeline_state_values.keys(),
actual_value=d['state'])
else:
raise TypeError(entity='state', expected_type=str,
actual_type=type(d['state']))
else:
self._state = states.INITIAL
if 'state_history' in d:
if isinstance(d['state_history'], list):
self._state_history = d['state_history']
else:
raise TypeError(entity='state_history', expected_type=list, actual_type=type(
d['state_history']))
if 'completed' in d:
if isinstance(d['completed'], bool):
if d['completed']:
self._completed_flag.set()
else:
raise TypeError(entity='completed', expected_type=bool,
actual_type=type(d['completed']))
|
Create a Pipeline from a dictionary. The change is in inplace.
:argument: python dictionary
:return: None
|
entailment
|
def _increment_stage(self):
"""
Purpose: Increment stage pointer. Also check if Pipeline has completed.
"""
try:
if self._cur_stage < self._stage_count:
self._cur_stage += 1
else:
self._completed_flag.set()
except Exception, ex:
raise EnTKError(text=ex)
|
Purpose: Increment stage pointer. Also check if Pipeline has completed.
|
entailment
|
def _decrement_stage(self):
"""
Purpose: Decrement stage pointer. Reset completed flag.
"""
try:
if self._cur_stage > 0:
self._cur_stage -= 1
self._completed_flag = threading.Event() # reset
except Exception, ex:
raise EnTKError(text=ex)
|
Purpose: Decrement stage pointer. Reset completed flag.
|
entailment
|
def _validate_entities(self, stages):
"""
Purpose: Validate whether the argument 'stages' is of list of Stage objects
:argument: list of Stage objects
"""
if not stages:
raise TypeError(expected_type=Stage, actual_type=type(stages))
if not isinstance(stages, list):
stages = [stages]
for value in stages:
if not isinstance(value, Stage):
raise TypeError(expected_type=Stage, actual_type=type(value))
return stages
|
Purpose: Validate whether the argument 'stages' is of list of Stage objects
:argument: list of Stage objects
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.