Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _handle_results(self):
"""
Call back function to be implemented by the CLI.
"""
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
self._relays = json.loads(self._api_result.text)
self._filter()
self._dump_json() |
def fromlist(cls, files, equal=False, offensive=False, lang=None):
"""Initialize based on a list of fortune files"""
self = cls.__new__(cls)
self.files = fortunes = []
count = 0
for file in files:
fortune = load_fortune(file, offensive=offensive, lang=lang)
if fortune is None:
logger.warn("Can't load: %s", file)
continue
count += 1 if equal else fortune.size
fortunes.append((fortune, count))
if not fortunes:
raise ValueError('All fortune files specified are invalid')
self.count = count
self.keys = [i[1] for i in self.files]
return self |
def set_chance(cls, files, equal=False, offensive=False, lang=None): # where files are (name, chance)
"""Initialize based on a list of fortune files with set chances"""
self = cls.__new__(cls)
total = 0.
file = []
leftover = []
for name, chance in files:
if total >= 1:
break
fortune = load_fortune(name, offensive=offensive, lang=lang)
if fortune is None or not fortune.size:
continue
if chance:
file.append((fortune, chance))
total += chance
else:
leftover.append(fortune)
if leftover and total < 1:
left = 1 - total
if equal:
perfile = left / len(leftover)
for fortune in leftover:
file.append((fortune, perfile))
else:
entries = sum(map(attrgetter('size'), leftover))
logger.debug('%d entries left', entries)
for fortune in leftover:
chance = left * fortune.size / entries
file.append((fortune, chance))
# Arbitrary limit to calculate upper bound with, nice round number
self.count = count = 65536
bound = 0
self.files = fortunes = []
for file, chance in file:
bound += int(chance * count)
fortunes.append((file, bound))
self.keys = [i[1] for i in self.files]
return self |
def main(context, **kwargs):
"""
virtue discovers and runs tests found in the given objects.
Provide it with one or more tests (packages, modules or objects) to run.
"""
result = run(**kwargs)
context.exit(not result.wasSuccessful()) |
def grammar(self, text):
"""grammar = {comment} , rule , {comment | rule} ;"""
self._attempting(text)
return concatenation([
zero_or_more(
self.comment,
ignore_whitespace=True
),
self.rule,
zero_or_more(
alternation([
self.comment,
self.rule,
]),
ignore_whitespace=True
),
], ignore_whitespace=True)(text).retyped(TokenType.grammar) |
def comment(self, text):
"""comment = "(*" . {printable - "*" | "*" . printable - ")"} . "*)" ;"""
self._attempting(text)
return concatenation([
"(*",
zero_or_more(
alternation([
exclusion(
self.printable,
"*"
),
concatenation([
"*",
exclusion(
self.printable,
")"
),
], ignore_whitespace=False),
]),
ignore_whitespace=False
),
"*)",
], ignore_whitespace=False)(text).compressed(TokenType.comment) |
def rule(self, text):
"""rule = identifier , "=" , expression , ";" ;"""
self._attempting(text)
return concatenation([
self.identifier,
"=",
self.expression,
";",
], ignore_whitespace=True)(text).retyped(TokenType.rule) |
def special_handling(self, text):
"""special_handling = "?" , identifier , "?" ;"""
self._attempting(text)
return concatenation([
"?",
self.identifier,
"?",
], ignore_whitespace=True)(text).retyped(TokenType.special_handling) |
def number(self, text):
"""number = digit - "0" . {digit} ;"""
self._attempting(text)
return concatenation([
exclusion(
self.digit,
"0"
),
zero_or_more(
self.digit,
ignore_whitespace=False
),
], ignore_whitespace=False)(text).compressed(TokenType.number) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.metricName is not None:
self.metricName = self.args.metricName
if self.args.measurement is not None:
self.measurement = self.args.measurement
if self.args.source is not None:
self.source = self.args.source
else:
self.source = socket.gethostname()
if self.args.timestamp is not None:
self.timestamp = int(self.args.timestamp)
m = {'metric': self.metricName,
'measure': self.measurement}
if self.source is not None:
m['source'] = self.source
if self.timestamp is not None:
m['timestamp'] = int(self.timestamp)
self._process_properties()
if self._properties is not None:
m['metadata'] = self._properties
self.data = json.dumps(m, sort_keys=True)
self.headers = {'Content-Type': 'application/json', "Accept": "application/json"} |
def _handle_results(self):
"""
Call back function to be implemented by the CLI.
"""
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
payload = json.loads(self._api_result.text)
out = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': '))
print(self.colorize_json(out)) |
def grammar(self):
"""The parse tree generated by the source."""
if self._grammar is None:
self.parser = Parser()
grammar = self.parser.parse(self.input_source)
self._grammar = grammar.trimmed().flattened().flattened(self._flatten)
return self._grammar |
def rules(self):
"""The AST rules."""
if self._rules is None:
self._rules = []
for child in self.grammar.children:
if child.is_type(TokenType.rule):
name, expression = child.children
self._rules.append(Rule(name.value, self._expression_to_asn(expression), name.position, child.consumed))
return self._rules |
def comments(self):
"""The AST comments."""
if self._comments is None:
self._comments = [c for c in self.grammar.children if c.is_type(TokenType.comment)]
return self._comments |
def directives(self):
"""The diretives parsed from the comments."""
if self._directives is None:
self._directives = []
for comment in self.comments:
self._directives.extend(self.directives_from_comment(comment))
return self._directives |
def output_source(self):
"""The python source of the parser generated from the input source."""
if self._output_source is None:
self._output_source = self._compile()
return self._output_source |
def _compile(self):
"""Returns the python source code for the generated parser."""
fmt = """\"\"\"This parser was generated by pyebnf on {date}.\"\"\"
from enum import Enum
from pyebnf import parser_base as PB
from pyebnf.primitive import alternation, concatenation, exclusion, one_or_more
from pyebnf.primitive import option, repeated, repetition, terminal, zero_or_more
{imports}
{token_type_enum}
{class_definition}
"""
fmt = self._clean_fmt(fmt)
return fmt.format(date=datetime.utcnow().isoformat(),
imports=self._get_imports(),
token_type_enum=self._get_token_type_enum(),
class_definition=self._get_class_definition()) |
def _get_imports(self):
"""Reads the directives and generates source code for custom imports."""
import_directives = [d for d in self.directives if d.name == "import"]
if import_directives:
return "\n" + "\n".join(d.args["value"] for d in import_directives)
else:
return "" |
def _get_token_type_enum(self):
"""Builds the python source code for the Parser TokenType enum."""
fmt = "class TokenType(Enum):\n" \
"{indent}\"\"\"The token types for parse nodes generated by the Parser.\"\"\"\n" \
"{indent}" + \
"\n{indent}".join("{1} = {0}".format(num + 1, r.name) for num, r in enumerate(self.rules))
return fmt.format(indent=self.indent) |
def _get_class_definition(self):
"""Builds the class definition of the parser."""
fmt = """class Parser({parser_base}):
{indent}\"\"\"This class contains methods for reading source code and generating a parse tree.\"\"\"
{indent}entry_point = "{entry_point}"
{rule_definitions}
"""
fmt = self._clean_fmt(fmt)
return fmt.format(parser_base=self._get_parser_base(),
indent=self.indent,
entry_point=self._get_entry_point(),
rule_definitions="\n".join(self._get_rule_definitions())) |
def _get_entry_point(self):
"""Gets the entry_point value for the parser."""
ep = self._find_directive("entry_point")
if ep:
return ep.args["value"]
else:
return self.rules[0].name |
def _get_rule_definition(self, rule):
"""Generates the source code for a rule."""
fmt = """def {rule_fxn_name}(self, text):
{indent}\"\"\"{rule_source}\"\"\"
{indent}self._attempting(text)
{indent}return {rule_definition}(text){transform}
"""
fmt = self._clean_fmt(fmt)
source = self._indent(self._ast_to_code(rule.expression), skip_first_line=True)
# All the primitives will accept a string x in place of terminal(x). This is terminal shorthand.
# However, if a rule is only a wrapper around a single terminal, we have to actually make a
# terminal call. This handles that situation.
if self.use_terminal_shorthand and len(source) == 1 and source[0].startswith(("'", '"')):
source = ["terminal({})".format(source[0])]
rule_source = fmt.format(rule_fxn_name=self._get_rule_fxn_name(rule.name),
indent=self.indent,
rule_source=self._get_rule_source(rule),
rule_definition="\n".join(source),
transform=self._get_rule_transform(rule))
return self._indent(rule_source, 1) |
def _get_rule_source(self, rule):
"""Gets the variable part of the source code for a rule."""
p = len(self.input_source) + rule.position
source = self.input_source[p:p + rule.consumed].rstrip()
return self._indent(source, depth=self.indent + " ", skip_first_line=True) |
def _get_rule_transform(self, rule):
"""The return value for each rule can be either retyped, compressed or left alone. This method
determines that and returns the source code text for accomplishing it.
"""
rd = self._find_directive(lambda d: d.name == "rule" and d.args.get("name") == rule.name)
if rd:
args = rd.args
else:
args = {}
transform = args.get("transform", "retype")
if transform == "retype":
new_name = args.get("to_type", "TokenType.{0}".format(rule.name))
return ".retyped({0})".format(new_name)
elif transform == "compress":
new_name = args.get("to_type", "TokenType.{0}".format(rule.name))
if new_name == "identity":
return ".compressed()"
else:
return ".compressed({0})".format(new_name)
elif transform == "identity":
return "" |
def _expression_to_asn(self, expression):
"""Convert an expression to an Abstract Syntax Tree Node."""
new_children = [self._node_to_asn(c) for c in expression.children]
return self._remove_grouping_groups(infix_to_optree(new_children)) |
def _node_to_asn(self, node):
"""Convert a parse tree node into an absract syntax tree node."""
if node.is_type(TokenType.identifier):
return Identifier(node.svalue)
elif node.is_type(TokenType.terminal):
return Terminal(node.svalue)
elif node.is_type(TokenType.option_group):
expr = node.children[0]
return OptionGroup(self._expression_to_asn(expr))
elif node.is_type(TokenType.repetition_group):
expr = node.children[0]
return RepetitionGroup(self._expression_to_asn(expr))
elif node.is_type(TokenType.grouping_group):
expr = node.children[0]
return GroupingGroup(self._expression_to_asn(expr))
elif node.is_type(TokenType.special_handling):
ident = node.children[0]
return SpecialHandling(ident)
elif node.is_type(TokenType.number):
return Number(node.svalue)
elif node.is_type((TokenType.operator, TokenType.op_mult, TokenType.op_add)):
return OperatorNode(OPERATOR_INDEX[node.svalue], node.position)
else:
raise Exception("Unhandled parse tree node: {0}".format(node)) |
def _hoist_operands(self, operands, pred):
"""Flattens a list of optree operands based on a pred.
This is used to convert concatenation([x, concatenation[y, ...]]) (or alternation) to
concatenation([x, y, ...]).
"""
hopper = list(operands)
new_operands = []
while hopper:
target = hopper.pop(0)
if pred(target):
hopper = list(target.operands) + hopper
else:
new_operands.append(target)
return new_operands |
def _remove_grouping_groups(self, optree):
"""Grouping groups are implied by optrees, this function hoists grouping group expressions up
to their parent node.
"""
new_operands = []
for operand in optree.operands:
if isinstance(operand, OptreeNode):
new_operands.append(self._remove_grouping_groups(operand))
elif isinstance(operand, GroupingGroup):
new_operands.append(operand.expression)
else:
new_operands.append(operand)
return OptreeNode(optree.opnode, new_operands) |
def _ast_to_code(self, node, **kwargs):
"""Convert an abstract syntax tree to python source code."""
if isinstance(node, OptreeNode):
return self._ast_optree_node_to_code(node, **kwargs)
elif isinstance(node, Identifier):
return self._ast_identifier_to_code(node, **kwargs)
elif isinstance(node, Terminal):
return self._ast_terminal_to_code(node, **kwargs)
elif isinstance(node, OptionGroup):
return self._ast_option_group_to_code(node, **kwargs)
elif isinstance(node, RepetitionGroup):
return self._ast_repetition_group_to_code(node, **kwargs)
elif isinstance(node, SpecialHandling):
return self._ast_special_handling_to_code(node, **kwargs)
elif isinstance(node, Number):
return self._ast_number_to_code(node, **kwargs)
else:
raise Exception("Unhandled ast node: {0}".format(node)) |
def _ast_optree_node_to_code(self, node, **kwargs):
"""Convert an abstract syntax operator tree to python source code."""
opnode = node.opnode
if opnode is None:
return self._ast_to_code(node.operands[0])
else:
operator = opnode.operator
if operator is OP_ALTERNATE:
return self._ast_op_alternate_to_code(node, **kwargs)
elif operator is OP_WS_CONCAT:
kwargs["ignore_whitespace"] = False
return self._ast_op_concat_to_code(node, **kwargs)
elif operator is OP_CONCAT:
kwargs["ignore_whitespace"] = True
return self._ast_op_concat_to_code(node, **kwargs)
elif operator is OP_EXCLUDE:
return self._ast_op_exclude_to_code(node, **kwargs)
elif operator is OP_MULTIPLY:
return self._ast_op_multiply_to_code(node, **kwargs)
elif operator is OP_REPEAT:
return self._ast_op_repeat_to_code(node, **kwargs)
else:
raise Exception("Unhandled optree node: {0}".format(node)) |
def _ast_terminal_to_code(self, terminal, **kwargs):
"""Convert an AST terminal to python source code."""
value = _replace(terminal.value)
if self.use_terminal_shorthand:
return [value]
else:
return ["terminal({})".format(value)] |
def _ast_option_group_to_code(self, option_group, **kwargs):
"""Convert an AST option group to python source code."""
lines = ["option("]
lines.extend(self._indent(self._ast_to_code(option_group.expression)))
lines.append(")")
return lines |
def _ast_repetition_group_to_code(self, repetition_group, ignore_whitespace=False, **kwargs):
"""Convert an AST repetition group to python source code."""
lines = ["zero_or_more("]
lines.extend(self._indent(self._ast_to_code(repetition_group.expression)))
lines[-1] += ","
lines.append(self._indent("ignore_whitespace={}".format(bool(ignore_whitespace))))
lines.append(")")
return lines |
def _ast_special_handling_to_code(self, special_handling, **kwargs):
"""Convert an AST sepcial handling to python source code."""
ident = special_handling.value.svalue
if ident in PB_SPECIAL_HANDLING:
return ["PB.{0}".format(ident)]
else:
return ["self.{0}".format(ident)] |
def _ast_op_alternate_to_code(self, opr, **kwargs):
"""Convert an AST alternate op to python source code."""
hoist_target = OP_ALTERNATE
operands = self._hoist_operands(opr.operands, lambda t: isinstance(t, OptreeNode) and t.opnode.operator is hoist_target)
lines = ["alternation(["]
for op in operands:
lines.extend(self._indent(self._ast_to_code(op)))
lines[-1] += ","
lines.append("])")
return lines |
def _ast_op_concat_to_code(self, opr, *, ignore_whitespace, **kwargs):
"""Convert an AST concatenate op to python source code."""
hoist_target = OP_CONCAT if ignore_whitespace else OP_WS_CONCAT
operands = self._hoist_operands(opr.operands, lambda t: isinstance(t, OptreeNode) and t.opnode.operator is hoist_target)
lines = ["concatenation(["]
for op in operands:
lines.extend(self._indent(self._ast_to_code(op, ignore_whitespace=ignore_whitespace)))
lines[-1] += ","
lines.append("], ignore_whitespace={})".format(bool(ignore_whitespace)))
return lines |
def _ast_op_exclude_to_code(self, opr, **kwargs):
"""Convert an AST exclude op to python source code."""
opl, opr = opr.operands
lines = ["exclusion("]
lines.extend(self._indent(self._ast_to_code(opl)))
lines[-1] += ","
lines.extend(self._indent(self._ast_to_code(opr)))
lines.append(")")
return lines |
def _ast_op_multiply_to_code(self, opr, ignore_whitespace=False, **kwargs):
"""Convert an AST multiply op to python source code."""
opl, opr = opr.operands
if isinstance(opl, Number):
times = opl.value
subject = self._ast_to_code(opr)
else:
times = opr.value
subject = self._ast_to_code(opl)
lines = ["repeated("]
lines.extend(self._indent(subject))
lines[-1] += ","
lines.append("{0}times={1},".format(self.indent, times))
lines.append("{0}ignore_whitespace={1}".format(self.indent, bool(ignore_whitespace)))
lines.append(")")
return lines |
def _ast_op_repeat_to_code(self, opr, ignore_whitespace=False, **kwargs):
"""Convert an AST repeat op to python source code."""
lines = ["one_or_more("]
lines.extend(self._indent(self._ast_to_code(opr.operands[0])))
lines[-1] += ","
lines.append(self._indent("ignore_whitespace={}".format(bool(ignore_whitespace))))
lines.append(")")
return lines |
def _indent(self, text, depth=1, *, skip_first_line=False, suffix=""):
"""Indent text by depth * self.indent.
Text can be either a string, or a list of strings. If it is a string, it will be split on
newline to a list of strings.
if skip_first_line is true, the first line will not be indented like the others.
"""
as_list = isinstance(text, list)
if as_list:
lines = text
else:
lines = text.split("\n")
new_lines = []
if isinstance(depth, int):
spacing = self.indent * depth
else:
spacing = depth
for i, line in enumerate(lines):
if skip_first_line and i == 0:
new_lines.append("{0}{1}".format(line, suffix))
else:
new_lines.append("{0}{1}{2}".format(spacing, line, suffix))
if as_list:
return new_lines
else:
return "\n".join(new_lines) |
def _find_directives(self, pred):
"""Finds all directives with a certain name, or that passes a predicate."""
if isinstance(pred, str):
return [d for d in self.directives if d.name == pred]
else:
return [d for d in self.directives if pred(d)] |
def _flatten(child, parent):
"""Custom flattening method for the parse tree."""
return parent.is_type(TokenType.expression) and child.node_type == parent.node_type |
def directives_from_comment(cls, comment):
"""A directive is a line in a comment that begins with '!'."""
comment_contents = comment.value[2:-2].strip()
comment_lines = (l.strip() for l in comment_contents.split("\n"))
directives = (l[1:].strip() for l in comment_lines if l.startswith("!"))
for directive_def in directives:
yield cls.parse_directive_def(directive_def) |
def parse_directive_def(cls, directive_def):
"""Turns a directive definition string into a directive object."""
name, *kwargs = esc_split(directive_def, ignore_empty=True)
return Directive(name, {key: value for key, value in (esc_split(arg, "=") for arg in kwargs)}) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.hostGroupName is not None:
self.url_parameters = {"name": self.args.hostGroupName} |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.plugin_name is not None:
self.plugin_name = self.args.plugin_name
self.path = "v1/plugins/{0}".format(self.plugin_name) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None |
def _handle_results(self):
"""
Handle the results of the API call
"""
# Only process if we get HTTP return code other 200.
if self._api_result.status_code != requests.codes.ok:
print(self.colorize_json(self._api_result.text)) |
def key_to_str(modifiers, key, mods_table = mods, key_table = wx, key_prefix = 'WXK_'):
"""
Returns a human-readable version of numerical modifiers and key.
To make the key suitable for global hotkey usage, supply:
mods_table = global_mods, key_table = win32con, key_prefix = 'VK_'
"""
logger.debug('Converting (%s, %s) to string.', modifiers, key)
if not key:
key_str = 'NONE'
else:
key_str = None
res = ''
for value, name in mods_table.items():
if (modifiers & value):
res += name + '+'
for x in dir(key_table):
if x.startswith(key_prefix):
if getattr(key_table, x) == key:
key_str = converts.get(x, x[len(key_prefix):])
if not key_str:
key_str = chr(key)
res += key_str
logger.debug('Final result: %s.', res)
return res |
def str_to_key(value, key_table = wx, accel_format = 'ACCEL_%s', key_format = 'WXK_%s', key_transpositions = {}):
"""
Turns a string like "CTRL_ALT+K" into (3, 75).
To get a global hotkey, try passing:
key_table = win32con, accel_format = 'MOD_%s', key_format = 'VK_%s', key_transpositions = {'CTRL': 'CONTROL'}
"""
logger.debug('Converting "%s" to integers.', value)
modifiers = 0
key = 0
split = value.split('+')
for v in split:
v = v.upper()
a = accel_format % key_transpositions.get(v, v)
logger.debug('Accelerator format = %s.', a)
k = key_format % key_transpositions.get(v, v)
logger.debug('Key format = %s.', k)
if hasattr(key_table, a):
logger.debug('Found accelerator on %r.', key_table)
modifiers = modifiers | getattr(key_table, a)
elif hasattr(key_table, k):
logger.debug('Found key on %r.', key_table)
if key:
raise ValueError('Multiple keys specified.')
else:
key = getattr(key_table, k)
if not key:
logger.debug('No key yet, falling back to ord.')
key = ord(split[-1])
logger.debug('modifiers = %d, key = %d.', modifiers, key)
return (modifiers, key) |
def get_id(id):
"""Get a new id if the provided one is None."""
if id == None:
id = wx.NewId()
logger.debug('Generated new ID %s.', id)
else:
logger.debug('Using provided id %s.', id)
return id |
def add_accelerator(control, key, func, id = None):
"""
Adds a key to the control.
control: The control that the accelerator should be added to.
key: A string like "CTRL+F", or "CMD+T" that specifies the key to use.
func: The function that should be called when key is pressed.
id: The id to Bind the event to. Defaults to wx.NewId().
"""
logger.debug('Adding key "%s" to control %s to call %s.', key, control, func)
id = get_id(id)
control.Bind(wx.EVT_MENU, func, id = id)
t = _tables.get(control, [])
modifiers, key_int = str_to_key(key)
t.append((modifiers, key_int, id))
_tables[control] = t
update_accelerators(control)
return id |
def remove_accelerator(control, key):
"""
Removes an accelerator from control.
control: The control to affect.
key: The key to remove.
"""
key = str_to_key(key)
t = _tables.get(control, [])
for a in t:
if a[:2] == key:
t.remove(a)
if t:
_tables[control] = t
else:
del _tables[control]
update_accelerators(control)
return True
return False |
def add_hotkey(control, key, func, id = None):
"""
Add a global hotkey bound to control via id that should call func.
control: The control to bind to.
key: The hotkey to use.
func: The func to call.
id: The new ID to use (defaults to creating a new ID.
"""
if win32con is None:
raise RuntimeError('win32con is not available.')
logger.debug('Adding hotkey "%s" to control %s to call %s.', key, control, func)
modifiers, keycode = str_to_key(key, key_table = win32con, accel_format = 'MOD_%s', key_format = 'VK_%s', key_transpositions = {'CTRL': 'CONTROL'})
id = get_id(id)
control.Bind(wx.EVT_HOTKEY, func, id = id)
l = _hotkeys.get(control, [])
l.append([key, id])
_hotkeys[control] = l
return control.RegisterHotKey(id, modifiers, keycode) |
def remove_hotkey(control, key):
"""
Remove a global hotkey.
control - The control to affect
key - The key to remove.
"""
l = _hotkeys.get(control, [])
for a in l:
key_str, id = a
if key_str == key:
control.Unbind(wx.EVT_HOTKEY, id = id)
control.UnregisterHotKey(id)
l.remove(a)
if l:
_hotkeys[control] = l
else:
del _hotkeys[control] |
def add_arguments(self):
"""
Configure handling of command line arguments.
"""
self.add_logging_argument()
self.parser.add_argument('-a', '--api-host', dest='api_host', action='store', metavar="api_host",
help='{0} API host endpoint'.format(self.product_name))
self.parser.add_argument('-e', '--email', dest='email', action='store', metavar="e_mail",
help='e-mail that has access to the {0} account'.format(self.product_name))
self.parser.add_argument('-t', '--api-token', dest='api_token', required=False, action='store',
metavar="api_token",
help='API token for given e-mail that has access to the {0} account'.format(
self.product_name))
self.parser.add_argument('-z', '--curl', dest='curl', required=False, action='store_true', default=False,
help='Output the corresponding curl command line and exit') |
def _configure_logging(self):
"""
Configure logging based on command line options
"""
if self.args.logLevel is not None:
logging.basicConfig(level=self.levels[self.args.logLevel])
logging.info("Set logging level to {0}".format(self.args.logLevel)) |
def get_arguments(self):
"""
CLIs get called back so that they can process any command line arguments
that are given. This method handles the standard command line arguments for:
API Host, user, password, etc.
"""
# We call this first so that logging is enabled as soon as possible
self._configure_logging()
# Extract the common command line arguments
if self.args.api_host is not None:
self._api_host = self.args.api_host
if self.args.email is not None:
self._email = self.args.email
if self.args.api_token is not None:
self._api_token = self.args.api_token
self._curl = self.args.curl
logging.debug("apihost: {0}".format(self._api_host))
logging.debug("email: {0}".format(self._email))
logging.debug("apitoken: {0}".format(self._api_token)) |
def _validate_arguments(self):
"""
Validates the command line arguments passed to the CLI
Derived classes that override need to call this method before
validating their arguments
"""
if self._email is None:
self.set_error_message("E-mail for the account not provided")
return False
if self._api_token is None:
self.set_error_message("API Token for the account not provided")
return False
return True |
def execute(self):
"""
Run the steps to execute the CLI
"""
# Set default arguments from environment variables
self._get_environment()
# Call our member function to add command line arguments, child classes that override need
# to call the ApiCli version first to add standard arguments
self.add_arguments()
# Parse the command line arguments
self._parse_args()
# Arguments are parsed call back to the instance so that it can extract the command line
# arguments for its use
self.get_arguments()
self.get_api_parameters()
if self._validate_arguments():
if self._curl:
self._curl_output()
else:
self._call_api()
self._handle_results()
else:
print(self._message) |
def infix_to_postfix(nodes, *, recurse_types=None):
"""Convert a list of nodes in infix order to a list of nodes in postfix order.
E.G. with normal algebraic precedence, 3 + 4 * 5 -> 3 4 5 * +
"""
output = []
operators = []
for node in nodes:
if isinstance(node, OperatorNode):
# Drain out all operators whose precedence is gte the node's...
cmp_operator = node.operator
while operators:
current_operator = operators[-1].operator
if current_operator.precedence > cmp_operator.precedence or \
current_operator.precedence == cmp_operator.precedence and current_operator.association == Association.left:
output.append(operators.pop())
else:
break
operators.append(node)
else:
if recurse_types is not None and node.node_type in recurse_types:
output.extend(infix_to_postfix(node.children, recurse_types=recurse_types))
else:
output.append(node)
return output + list(reversed(operators)) |
def postfix_to_optree(nodes):
"""Convert a list of nodes in postfix order to an Optree."""
while len(nodes) > 1:
nodes = _reduce(nodes)
if len(nodes) == 0:
raise OperatorError("Empty node list")
node = nodes[0]
if isinstance(node, OperatorNode):
raise OperatorError("Operator without operands")
if isinstance(node, OptreeNode):
return node
return OptreeNode(None, (node, )) |
def _reduce(nodes):
"""Finds the first operator in the list, converts it and its operands to a OptreeNode, then
returns a new list with the operator and operands replaced by the new OptreeNode.
"""
i = 0
while i < len(nodes):
if isinstance(nodes[i], OperatorNode):
break
else:
i += 1
if i == len(nodes):
raise OperatorError("No operator found")
operator_node = nodes[i]
operator = operator_node.operator
operands_lbound = i - operator.cardinality
if operands_lbound < 0:
raise OperatorError("Insufficient operands for operator {0}".format(operator.symbol))
return nodes[:operands_lbound] + \
[OptreeNode(operator_node, tuple(nodes[operands_lbound:i]))] + \
nodes[i+1:] |
def pprint(root, depth=0, space_unit=" "):
"""Pretty print an optree, starting at root."""
spacing = space_unit * depth
if isinstance(root, OptreeNode):
print("{0}Operator ({1})".format(spacing, root.opnode.operator.symbol if root.opnode else "None -> IDENTITY"))
for operand in root.operands:
pprint(operand, depth + 1)
else:
print("{0}• {1}".format(spacing, root)) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.pluginName is not None:
self.pluginName = self.args.pluginName |
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1] |
def add_arguments(self):
"""
Add the specific arguments of this CLI
"""
MetricCommon.add_arguments(self)
self.parser.add_argument('-n', '--metric-name', dest='metricName', action='store',
required=True, metavar='metric_name', help='Metric identifier')
self.parser.add_argument('-d', '--display-name', dest='displayName', action='store',
required=True, metavar='display_name', help='Metric display name')
self.parser.add_argument('-s', '--display-name-short', dest='displayNameShort', action='store',
required=True, metavar='display_short_name', help='Metric short display name')
self.parser.add_argument('-i', '--description', dest='description', action='store',
required=not self.update, metavar='description', help='Metric description')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store',
required=True, choices=['avg', 'max', 'min', 'sum'],
help='Metric default aggregate')
self.parser.add_argument('-u', '--unit', dest='unit', action='store',
required=False, choices=['percent', 'number', 'bytecount', 'duration'],
help='Metric unit')
self.parser.add_argument('-r', '--resolution', dest='resolution', action='store', metavar='resolution',
required=False, help='Metric default resolution')
self.parser.add_argument('-y', '--type', dest='type', action='store', default=None,
required=False, metavar='type', help='Sets the type metadata field')
self.parser.add_argument('-x', '--is-disabled', dest='isDisabled', action='store', default=None,
required=False,
choices=['true', 'false'], help='Enable or disable the metric definition') |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
MetricCommon.get_arguments(self)
if self.args.metricName is not None:
self.metricName = self.args.metricName
if self.args.displayName is not None:
self.displayName = self.args.displayName
if self.args.displayNameShort is not None:
self.displayNameShort = self.args.displayNameShort
if self.args.description is not None:
self.description = self.args.description
if self.args.aggregate is not None:
self.aggregate = self.args.aggregate
if self.args.unit is not None:
self.unit = self.args.unit
if self.args.resolution is not None:
self.resolution = self.args.resolution
if self.args.isDisabled is not None:
self.isDisabled = self.args.isDisabled
if self.args.type is not None:
self.type = self.args.type
data = {}
if self.metricName is not None:
data['name'] = self.metricName
if self.displayName is not None:
data['displayName'] = self.displayName
if self.displayNameShort is not None:
data['displayNameShort'] = self.displayNameShort
if self.description is not None:
data['description'] = self.description
if self.aggregate is not None:
data['defaultAggregate'] = self.aggregate
if self.unit is not None:
data['unit'] = self.unit
if self.resolution is not None:
data['defaultResolutionMS'] = self.resolution
if self.isDisabled is not None:
data['isDisabled'] = True if self.isDisabled == 'yes' else False
if self.type is not None:
data['type'] = self.type
self.path = "v1/metrics/{0}".format(self.metricName)
self.data = json.dumps(data, sort_keys=True)
self.headers = {'Content-Type': 'application/json', "Accept": "application/json"} |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
self._alarm_name = self.args.alarm_name if self.args.alarm_name is not None else None |
def read(self):
"""
Load the metrics file from the given path
"""
f = open(self.path, "r")
self.manifest_json = f.read() |
def load(self):
"""
Read the file and parse JSON into dictionary
"""
manifest = PluginManifest(self.file_path)
manifest.get()
self.manifest = manifest.get_manifest() |
def getMetricDefinition(self, name):
"""
Looks up the metric definition from the definitions from the API call
"""
metric = None
for m in self.metric_definitions:
if m['name'] == name:
metric = m
break
return metric |
def printMetricsHeader(self, m, d):
"""
Prints out table header based on the size of the data in columns
"""
mstr = "Metric Name"
dstr = "Description"
print('|{0}{1}|{2}{3}|'.format(mstr, ' ' * (m - len(mstr)), dstr, ' ' * (d - len(dstr))))
print('|:{0}|:{1}|'.format('-' * (m - 1), '-' * (d - 1))) |
def getFieldsColumnLengths(self):
"""
Gets the maximum length of each column in the field table
"""
nameLen = 0
descLen = 0
for f in self.fields:
nameLen = max(nameLen, len(f['title']))
descLen = max(descLen, len(f['description']))
return (nameLen, descLen) |
def getMetricsColumnLengths(self):
"""
Gets the maximum length of each column
"""
displayLen = 0
descLen = 0
for m in self.metrics:
displayLen = max(displayLen, len(m['displayName']))
descLen = max(descLen, len(m['description']))
return (displayLen, descLen) |
def escapeUnderscores(self):
"""
Escape underscores so that the markdown is correct
"""
new_metrics = []
for m in self.metrics:
m['name'] = m['name'].replace("_", "\_")
new_metrics.append(m)
self.metrics = new_metrics |
def printFieldsHeader(self, f, d):
"""
Prints out table header based on the size of the data in columns
"""
fstr = "Field Name"
dstr = "Description"
f = max(f, len(fstr))
d = max(d, len(dstr))
print('|{0}{1}|{2}{3}|'.format(fstr, ' ' * (f - len(fstr)), dstr, ' ' * (d - len(dstr))))
print('|:{0}|:{1}|'.format('-' * (f - 1), '-' * (d - 1)))
return (f, d) |
def printMetrics(self, m, d):
"""
Prints out table rows based on the size of the data in columns
"""
for metric in self.metrics:
mstr = metric['displayName']
dstr = metric['description']
mlen = m - len(mstr)
dlen = d - len(dstr)
print("|{0}{1}|{2}{3}|".format(mstr, ' ' * mlen, dstr, ' ' * dlen)) |
def printFields(self, f, d):
"""
Prints out table rows based on the size of the data in columns
"""
for field in self.fields:
fstr = field["title"]
dstr = field["description"]
flen = f - len(fstr)
dlen = d - len(dstr)
print("|{0}{1}|{2}{3}|".format(fstr, ' ' * flen, dstr, ' ' * dlen)) |
def outputFieldMarkdown(self):
"""
Sends the field definitions ot standard out
"""
f, d = self.getFieldsColumnLengths()
fc, dc = self.printFieldsHeader(f, d)
f = max(fc, f)
d = max(dc, d)
self.printFields(f, d) |
def outputMetricMarkdown(self):
"""
Sends the markdown of the metric definitions to standard out
"""
self.escapeUnderscores()
m, d = self.getMetricsColumnLengths()
self.printMetricsHeader(m, d)
self.printMetrics(m, d) |
def generateMarkdown(self):
"""
Look up each of the metrics and then output in Markdown
"""
self.generateMetricDefinitions()
self.generateFieldDefinitions()
self.generateDashboardDefinitions()
self.outputMarkdown() |
def parse(self, text):
"""Attempt to parse source code."""
self.original_text = text
try:
return getattr(self, self.entry_point)(text)
except (DeadEnd) as exc:
raise ParserError(self.most_consumed, "Failed to parse input") from exc
return tree |
def _attempting(self, text):
"""Keeps track of the furthest point in the source code the parser has reached to this point."""
consumed = len(self.original_text) - len(text)
self.most_consumed = max(consumed, self.most_consumed) |
def add_arguments(self):
"""
Add specific command line arguments for this command
"""
# Call our parent to add the default arguments
ApiCli.add_arguments(self)
# Command specific arguments
self.parser.add_argument('-f', '--format', dest='format', action='store', required=False,
choices=['csv', 'json', 'raw', 'xml'], help='Output format. Default is raw')
self.parser.add_argument('-n', '--name', dest='metric_name', action='store', required=True,
metavar="metric_name", help='Metric identifier')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store', required=False,
choices=['sum', 'avg', 'max', 'min'], help='Metric default aggregate')
self.parser.add_argument('-r', '--sample', dest='sample', action='store', type=int, metavar="sample",
help='Down sample rate sample in seconds')
self.parser.add_argument('-s', '--source', dest='source', action='store', metavar="source", required=True,
help='Source of measurement')
self.parser.add_argument('-b', '--start', dest='start', action='store', required=True, metavar="start",
help='Start of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-d', '--end', dest='end', action='store', metavar="end", required=False,
help='End of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-o', '--date-format', dest='date_format', action='store', metavar="format",
required=False,
help='For CSV, JSON, and XML output formats dates (see Python date.strftime). ' +
'Default format is %%s') |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.metric_name is not None:
self._metric_name = self.args.metric_name
if self.args.sample is not None:
self.sample = self.args.sample
if self.args.source is not None:
self.source = self.args.source
else:
self.source = None
if self.args.aggregate is not None:
self.aggregate = self.args.aggregate
else:
self.aggregate = "avg"
if self.args.format is not None:
self.format = self.args.format
else:
self.format = "json"
if self.args.date_format is not None:
self.date_format = self.args.date_format
start_time = int(self.parse_time_date(self.args.start).strftime("%s"))
# If the end time is not specified then
# default to the current time
if self.args.end is None:
stop_time = int(self.now.strftime("%s"))
else:
stop_time = int(self.parse_time_date(self.args.end).strftime("%s"))
# Convert to epoch time in milli-seconds
start_time *= 1000
stop_time *= 1000
self.path = "v1/measurements/{0}".format(self._metric_name)
url_parameters = {"start": str(start_time),
"end": str(stop_time),
"sample": str(self.sample),
"agg": self.aggregate}
if self.source is not None:
url_parameters['source'] = self.source
self.url_parameters = url_parameters |
def parse_time_date(self, s):
"""
Attempt to parse the passed in string into a valid datetime.
If we get a parse error then assume the string is an epoch time
and convert to a datetime.
"""
try:
ret = parser.parse(str(s))
except ValueError:
try:
ret = datetime.fromtimestamp(int(s))
except TypeError:
ret = None
return ret |
def output_csv(self, text):
"""
Output results in CSV format
"""
payload = json.loads(text)
# Print CSV header
print("{0},{1},{2},{3},{4}".format('timestamp', 'metric', 'aggregate', 'source', 'value'))
metric_name = self._metric_name
# Loop through the aggregates one row per timestamp, and 1 or more source/value pairs
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
# timestamp = string.strip(timestamp, ' ')
# timestamp = string.strip(timestamp, "'")
for s in r[1]:
print('{0},"{1}","{2}","{3}",{4}'.format(timestamp, metric_name, self.aggregate, s[0], s[1])) |
def output_json(self, text):
"""
Output results in structured JSON format
"""
payload = json.loads(text)
data = []
metric_name = self._metric_name
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
for s in r[1]:
data.append({
"timestamp": timestamp,
"metric": metric_name,
"aggregate": self.aggregate,
"source": s[0],
"value": s[1],
})
payload = {"data": data}
out = json.dumps(payload, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out)) |
def output_raw(self, text):
"""
Output results in raw JSON format
"""
payload = json.loads(text)
out = json.dumps(payload, sort_keys=True, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out)) |
def output_xml(self, text):
"""
Output results in JSON format
"""
# Create the main document nodes
document = Element('results')
comment = Comment('Generated by TrueSight Pulse measurement-get CLI')
document.append(comment)
aggregates = SubElement(document, 'aggregates')
aggregate = SubElement(aggregates, 'aggregate')
measurements = SubElement(aggregate, 'measurements')
# Parse the JSON result so we can translate to XML
payload = json.loads(text)
# Current only support a single metric, if we move to the batch API then
# we can handle multiple
metric_name = self._metric_name
# Loop through the aggregates one row per timestamp, and 1 or more source/value pairs
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
for s in r[1]:
# Each timestamp, metric, source, values is placed in a measure tag
measure_node = SubElement(measurements, 'measure')
source = s[0]
value = str(s[1])
ts_node = SubElement(measure_node, 'timestamp')
ts_node.text = str(timestamp)
metric_node = SubElement(measure_node, 'metric')
metric_node.text = metric_name
metric_node = SubElement(measure_node, 'aggregate')
metric_node.text = self.aggregate
source_node = SubElement(measure_node, 'source')
source_node.text = source
value_node = SubElement(measure_node, 'value')
value_node.text = value
rough_string = ElementTree.tostring(document, 'utf-8')
reparse = minidom.parseString(rough_string)
output = reparse.toprettyxml(indent=" ")
print(self.colorize_xml(output)) |
def _handle_results(self):
"""
Call back function to be implemented by the CLI.
"""
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
if self.format == "json":
self.output_json(self._api_result.text)
elif self.format == "csv":
self.output_csv(self._api_result.text)
elif self.format == "raw":
self.output_raw(self._api_result.text)
elif self.format == "xml":
self.output_xml(self._api_result.text)
else:
pass |
def trimmed_pred_default(node, parent):
"""The default predicate used in Node.trimmed."""
return isinstance(node, ParseNode) and (node.is_empty or node.is_type(ParseNodeType.terminal)) |
def pprint(root, depth=0, space_unit=" ", *, source_len=0, file=None):
"""Pretting print a parse tree."""
spacing = space_unit * depth
if isinstance(root, str):
print("{0}terminal@(?): {1}".format(spacing, root), file=file)
else:
if root.position is None:
position = -1
elif root.position < 0:
position = source_len + root.position
else:
position = root.position
if root.is_value:
print("{0}{1}@({2}:{3}):\t{4}".format(spacing, root.node_type, position, root.consumed, root.svalue), file=file)
else:
print("{0}{1}@({2}:{3}):".format(spacing, root.node_type, position, root.consumed), file=file)
for child in root.children:
pprint(child, depth + 1, source_len=source_len, file=file) |
def zero_or_more(extractor, *, ignore_whitespace=False):
"""Returns a partial of _get_repetition with bounds set to (0, None) that accepts only a text
argument.
"""
return partial(_get_repetition, extractor, bounds=(0, None), ignore_whitespace=ignore_whitespace) |
def one_or_more(extractor, *, ignore_whitespace=False):
"""Returns a partial of _get_repetition with bounds set to (1, None) that accepts only a text
argument.
"""
return partial(_get_repetition, extractor, bounds=(1, None), ignore_whitespace=ignore_whitespace) |
def repeated(extractor, times, *, ignore_whitespace=False):
"""Returns a partial of _get_repetition with bounds set to (times, times) that accepts only a text
argument.
"""
return partial(_get_repetition,
extractor,
bounds=(times, times),
ignore_whitespace=ignore_whitespace) |
def repetition(extractor, bounds, *, ignore_whitespace=False):
"""Returns a partial of _get_repetition that accepts only a text argument."""
return partial(_get_repetition, extractor, bounds=bounds, ignore_whitespace=ignore_whitespace) |
def _get_terminal(value, text):
"""Checks the beginning of text for a value. If it is found, a terminal ParseNode is returned
filled out appropriately for the value it found. DeadEnd is raised if the value does not match.
"""
if text and text.startswith(value):
return ParseNode(ParseNodeType.terminal,
children=[value],
consumed=len(value),
position=-len(text))
else:
raise DeadEnd() |
def _get_concatenation(extractors, text, *, ignore_whitespace=True):
"""Returns a concatenation ParseNode whose children are the nodes returned by each of the
methods in the extractors enumerable.
If ignore_whitespace is True, whitespace will be ignored and then attached to the child it
preceeded.
"""
ignored_ws, use_text = _split_ignored(text, ignore_whitespace)
extractor, *remaining = extractors
child = _call_extractor(extractor, use_text)
child.add_ignored(ignored_ws)
# TODO: Should I set node.position = -len(text) for the case that ignored whitespace will cause
# the first child's position to not be the whitespace, and therefore the concatenation's
# position will be the first non-whitespace? I think not, but I'm adding this note in
# case that causes an issue I'm not seeing at the moment.
node = ParseNode(ParseNodeType.concatenation, children=[child])
if remaining:
# child.consumed will include ignored whitespace, so we base the text we pass on on text rather
# than use_text.
return node.merged(_get_concatenation(remaining,
text[child.consumed:],
ignore_whitespace=ignore_whitespace))
else:
return node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.