signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def safe_dump_all(documents, stream=None, **kwds):
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)<EOL>
Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead.
f8263:m13
def safe_dump(data, stream=None, **kwds):
return dump_all([data], stream, Dumper=SafeDumper, **kwds)<EOL>
Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead.
f8263:m14
def add_implicit_resolver(tag, regexp, first=None,<EOL>Loader=Loader, Dumper=Dumper):
Loader.add_implicit_resolver(tag, regexp, first)<EOL>Dumper.add_implicit_resolver(tag, regexp, first)<EOL>
Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None.
f8263:m15
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
Loader.add_path_resolver(tag, path, kind)<EOL>Dumper.add_path_resolver(tag, path, kind)<EOL>
Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None.
f8263:m16
def add_constructor(tag, constructor, Loader=Loader):
Loader.add_constructor(tag, constructor)<EOL>
Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object.
f8263:m17
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
Loader.add_multi_constructor(tag_prefix, multi_constructor)<EOL>
Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object.
f8263:m18
def add_representer(data_type, representer, Dumper=Dumper):
Dumper.add_representer(data_type, representer)<EOL>
Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node.
f8263:m19
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
Dumper.add_multi_representer(data_type, multi_representer)<EOL>
Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node.
f8263:m20
def from_yaml(cls, loader, node):
return loader.construct_yaml_object(node, cls)<EOL>
Convert a representation node to a Python object.
f8263:c1:m0
def to_yaml(cls, dumper, data):
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,<EOL>flow_style=cls.yaml_flow_style)<EOL>
Convert a Python object to a representation node.
f8263:c1:m1
def __init__(self):
<EOL>self.done = False<EOL>self.flow_level = <NUM_LIT:0><EOL>self.tokens = []<EOL>self.fetch_stream_start()<EOL>self.tokens_taken = <NUM_LIT:0><EOL>self.indent = -<NUM_LIT:1><EOL>self.indents = []<EOL>self.allow_simple_key = True<EOL>self.possible_simple_keys = {}<EOL>
Initialize the scanner.
f8269:c2:m0
def scan(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_token():<EOL><INDENT>yield loader.get_token()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Scan a YAML stream and produce scanning tokens.
f8278:m0
def parse(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_event():<EOL><INDENT>yield loader.get_event()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Parse a YAML stream and produce parsing events.
f8278:m1
def compose(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>return loader.get_single_node()<EOL><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Parse the first YAML document in a stream and produce the corresponding representation tree.
f8278:m2
def compose_all(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_node():<EOL><INDENT>yield loader.get_node()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Parse all YAML documents in a stream and produce corresponding representation trees.
f8278:m3
def load(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>return loader.get_single_data()<EOL><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Parse the first YAML document in a stream and produce the corresponding Python object.
f8278:m4
def load_all(stream, Loader=Loader):
loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_data():<EOL><INDENT>yield loader.get_data()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>
Parse all YAML documents in a stream and produce corresponding Python objects.
f8278:m5
def safe_load(stream):
return load(stream, SafeLoader)<EOL>
Parse the first YAML document in a stream and produce the corresponding Python object. Resolve only basic YAML tags.
f8278:m6
def safe_load_all(stream):
return load_all(stream, SafeLoader)<EOL>
Parse all YAML documents in a stream and produce corresponding Python objects. Resolve only basic YAML tags.
f8278:m7
def emit(events, stream=None, Dumper=Dumper,<EOL>canonical=None, indent=None, width=None,<EOL>allow_unicode=None, line_break=None):
getvalue = None<EOL>if stream is None:<EOL><INDENT>stream = io.StringIO()<EOL>getvalue = stream.getvalue<EOL><DEDENT>dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,<EOL>allow_unicode=allow_unicode, line_break=line_break)<EOL>try:<EOL><INDENT>for event in events:<EOL><INDENT>dumper.emit(event)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>dumper.dispose()<EOL><DEDENT>if getvalue:<EOL><INDENT>return getvalue()<EOL><DEDENT>
Emit YAML parsing events into a stream. If stream is None, return the produced string instead.
f8278:m8
def serialize_all(nodes, stream=None, Dumper=Dumper,<EOL>canonical=None, indent=None, width=None,<EOL>allow_unicode=None, line_break=None,<EOL>encoding=None, explicit_start=None, explicit_end=None,<EOL>version=None, tags=None):
getvalue = None<EOL>if stream is None:<EOL><INDENT>if encoding is None:<EOL><INDENT>stream = io.StringIO()<EOL><DEDENT>else:<EOL><INDENT>stream = io.BytesIO()<EOL><DEDENT>getvalue = stream.getvalue<EOL><DEDENT>dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,<EOL>allow_unicode=allow_unicode, line_break=line_break,<EOL>encoding=encoding, version=version, tags=tags,<EOL>explicit_start=explicit_start, explicit_end=explicit_end)<EOL>try:<EOL><INDENT>dumper.open()<EOL>for node in nodes:<EOL><INDENT>dumper.serialize(node)<EOL><DEDENT>dumper.close()<EOL><DEDENT>finally:<EOL><INDENT>dumper.dispose()<EOL><DEDENT>if getvalue:<EOL><INDENT>return getvalue()<EOL><DEDENT>
Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead.
f8278:m9
def serialize(node, stream=None, Dumper=Dumper, **kwds):
return serialize_all([node], stream, Dumper=Dumper, **kwds)<EOL>
Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead.
f8278:m10
def dump_all(documents, stream=None, Dumper=Dumper,<EOL>default_style=None, default_flow_style=None,<EOL>canonical=None, indent=None, width=None,<EOL>allow_unicode=None, line_break=None,<EOL>encoding=None, explicit_start=None, explicit_end=None,<EOL>version=None, tags=None):
getvalue = None<EOL>if stream is None:<EOL><INDENT>if encoding is None:<EOL><INDENT>stream = io.StringIO()<EOL><DEDENT>else:<EOL><INDENT>stream = io.BytesIO()<EOL><DEDENT>getvalue = stream.getvalue<EOL><DEDENT>dumper = Dumper(stream, default_style=default_style,<EOL>default_flow_style=default_flow_style,<EOL>canonical=canonical, indent=indent, width=width,<EOL>allow_unicode=allow_unicode, line_break=line_break,<EOL>encoding=encoding, version=version, tags=tags,<EOL>explicit_start=explicit_start, explicit_end=explicit_end)<EOL>try:<EOL><INDENT>dumper.open()<EOL>for data in documents:<EOL><INDENT>dumper.represent(data)<EOL><DEDENT>dumper.close()<EOL><DEDENT>finally:<EOL><INDENT>dumper.dispose()<EOL><DEDENT>if getvalue:<EOL><INDENT>return getvalue()<EOL><DEDENT>
Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead.
f8278:m11
def dump(data, stream=None, Dumper=Dumper, **kwds):
return dump_all([data], stream, Dumper=Dumper, **kwds)<EOL>
Serialize a Python object into a YAML stream. If stream is None, return the produced string instead.
f8278:m12
def safe_dump_all(documents, stream=None, **kwds):
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)<EOL>
Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead.
f8278:m13
def safe_dump(data, stream=None, **kwds):
return dump_all([data], stream, Dumper=SafeDumper, **kwds)<EOL>
Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead.
f8278:m14
def add_implicit_resolver(tag, regexp, first=None,<EOL>Loader=Loader, Dumper=Dumper):
Loader.add_implicit_resolver(tag, regexp, first)<EOL>Dumper.add_implicit_resolver(tag, regexp, first)<EOL>
Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None.
f8278:m15
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
Loader.add_path_resolver(tag, path, kind)<EOL>Dumper.add_path_resolver(tag, path, kind)<EOL>
Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None.
f8278:m16
def add_constructor(tag, constructor, Loader=Loader):
Loader.add_constructor(tag, constructor)<EOL>
Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object.
f8278:m17
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
Loader.add_multi_constructor(tag_prefix, multi_constructor)<EOL>
Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object.
f8278:m18
def add_representer(data_type, representer, Dumper=Dumper):
Dumper.add_representer(data_type, representer)<EOL>
Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node.
f8278:m19
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
Dumper.add_multi_representer(data_type, multi_representer)<EOL>
Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node.
f8278:m20
@classmethod<EOL><INDENT>def from_yaml(cls, loader, node):<DEDENT>
return loader.construct_yaml_object(node, cls)<EOL>
Convert a representation node to a Python object.
f8278:c1:m0
@classmethod<EOL><INDENT>def to_yaml(cls, dumper, data):<DEDENT>
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,<EOL>flow_style=cls.yaml_flow_style)<EOL>
Convert a Python object to a representation node.
f8278:c1:m1
def __init__(self):
<EOL>self.done = False<EOL>self.flow_level = <NUM_LIT:0><EOL>self.tokens = []<EOL>self.fetch_stream_start()<EOL>self.tokens_taken = <NUM_LIT:0><EOL>self.indent = -<NUM_LIT:1><EOL>self.indents = []<EOL>self.allow_simple_key = True<EOL>self.possible_simple_keys = {}<EOL>
Initialize the scanner.
f8283:c2:m0
def push_token(self, tok):
if self.debug >= <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>" + repr(tok))<EOL><DEDENT>self.pushback.appendleft(tok)<EOL>
Push a token onto the stack popped by the get_token method
f8284:c0:m1
def push_source(self, newstream, newfile=None):
if isinstance(newstream, str):<EOL><INDENT>newstream = StringIO(newstream)<EOL><DEDENT>self.filestack.appendleft((self.infile, self.instream, self.lineno))<EOL>self.infile = newfile<EOL>self.instream = newstream<EOL>self.lineno = <NUM_LIT:1><EOL>if self.debug:<EOL><INDENT>if newfile is not None:<EOL><INDENT>print('<STR_LIT>' % (self.infile,))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % (self.instream,))<EOL><DEDENT><DEDENT>
Push an input source onto the lexer's input source stack.
f8284:c0:m2
def pop_source(self):
self.instream.close()<EOL>(self.infile, self.instream, self.lineno) = self.filestack.popleft()<EOL>if self.debug:<EOL><INDENT>print('<STR_LIT>'% (self.instream, self.lineno))<EOL><DEDENT>self.state = '<STR_LIT:U+0020>'<EOL>
Pop the input source stack.
f8284:c0:m3
def get_token(self):
if self.pushback:<EOL><INDENT>tok = self.pushback.popleft()<EOL>if self.debug >= <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>" + repr(tok))<EOL><DEDENT>return tok<EOL><DEDENT>raw = self.read_token()<EOL>if self.source is not None:<EOL><INDENT>while raw == self.source:<EOL><INDENT>spec = self.sourcehook(self.read_token())<EOL>if spec:<EOL><INDENT>(newfile, newstream) = spec<EOL>self.push_source(newstream, newfile)<EOL><DEDENT>raw = self.get_token()<EOL><DEDENT><DEDENT>while raw == self.eof:<EOL><INDENT>if not self.filestack:<EOL><INDENT>return self.eof<EOL><DEDENT>else:<EOL><INDENT>self.pop_source()<EOL>raw = self.get_token()<EOL><DEDENT><DEDENT>if self.debug >= <NUM_LIT:1>:<EOL><INDENT>if raw != self.eof:<EOL><INDENT>print("<STR_LIT>" + repr(raw))<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT><DEDENT>return raw<EOL>
Get a token from the input stream (or from stack if it's nonempty)
f8284:c0:m4
def sourcehook(self, newfile, encoding='<STR_LIT:utf-8>'):
from codecs import open<EOL>if newfile[<NUM_LIT:0>] == '<STR_LIT:">':<EOL><INDENT>newfile = newfile[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>if isinstance(self.infile, str) and not os.path.isabs(newfile):<EOL><INDENT>newfile = os.path.join(os.path.dirname(self.infile), newfile)<EOL><DEDENT>return (newfile, open(newfile, "<STR_LIT:r>", encoding))<EOL>
Hook called on a filename to be sourced.
f8284:c0:m8
def error_leader(self, infile=None, lineno=None):
if infile is None:<EOL><INDENT>infile = self.infile<EOL><DEDENT>if lineno is None:<EOL><INDENT>lineno = self.lineno<EOL><DEDENT>return "<STR_LIT>" % (infile, lineno)<EOL>
Emit a C-compiler-like, Emacs-friendly error-message leader.
f8284:c0:m9
def tokenize(expr):
tokens = []<EOL>escape = False<EOL>cur_token = '<STR_LIT>'<EOL>for c in expr:<EOL><INDENT>if escape == True:<EOL><INDENT>cur_token += c<EOL>escape = False<EOL><DEDENT>else:<EOL><INDENT>if c == '<STR_LIT:\\>':<EOL><INDENT>escape = True<EOL>continue<EOL><DEDENT>elif c == '<STR_LIT:[>':<EOL><INDENT>if len(cur_token) > <NUM_LIT:0>:<EOL><INDENT>tokens.append(cur_token)<EOL>cur_token = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif c == '<STR_LIT:]>':<EOL><INDENT>if len(cur_token) > <NUM_LIT:0>:<EOL><INDENT>tokens.append(int(cur_token))<EOL>cur_token = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif c == '<STR_LIT:.>':<EOL><INDENT>if len(cur_token) > <NUM_LIT:0>:<EOL><INDENT>tokens.append(cur_token)<EOL>cur_token = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cur_token += c<EOL><DEDENT><DEDENT><DEDENT>if len(cur_token) > <NUM_LIT:0>:<EOL><INDENT>tokens.append(cur_token)<EOL><DEDENT>return tokens<EOL>
Parse a string expression into a set of tokens that can be used as a path into a Python datastructure.
f8285:m0
def jsonxs(data, expr, action=ACTION_GET, value=None, default=None):
tokens = tokenize(expr)<EOL>try:<EOL><INDENT>prev_path = None<EOL>cur_path = data<EOL>for token in tokens:<EOL><INDENT>prev_path = cur_path<EOL>if not token in cur_path and action in [ACTION_SET, ACTION_MKDICT, ACTION_MKLIST]:<EOL><INDENT>continue<EOL><DEDENT>cur_path = cur_path[token]<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>if default is not None:<EOL><INDENT>return default<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if action == ACTION_GET:<EOL><INDENT>return cur_path<EOL><DEDENT>elif action == ACTION_DEL:<EOL><INDENT>del prev_path[token]<EOL><DEDENT>elif action == ACTION_SET:<EOL><INDENT>prev_path[token] = value<EOL><DEDENT>elif action == ACTION_APPEND:<EOL><INDENT>prev_path[token].append(value)<EOL><DEDENT>elif action == ACTION_INSERT:<EOL><INDENT>prev_path.insert(token, value)<EOL><DEDENT>elif action == ACTION_MKDICT:<EOL><INDENT>prev_path[token] = {}<EOL><DEDENT>elif action == ACTION_MKLIST:<EOL><INDENT>prev_path[token] = []<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(action))<EOL><DEDENT>
Get, set, delete values in a JSON structure. `expr` is a JSONpath-like expression pointing to the desired value. `action` determines the action to perform. See the module-level `ACTION_*` constants. `value` should be given if action is `ACTION_SET`. If `default` is set and `expr` isn't found, return `default` instead. This will override all exceptions.
f8285:m1
def is_executable(path):
return stat.S_IXUSR & os.stat(path)[stat.ST_MODE]<EOL>
Determine whether `path` points to an executable file.
f8288:m0
def deepupdate(target, src, overwrite=True):
for k, v in src.items():<EOL><INDENT>if type(v) == list:<EOL><INDENT>if not k in target:<EOL><INDENT>target[k] = copy.deepcopy(v)<EOL><DEDENT>elif overwrite is True:<EOL><INDENT>target[k].extend(v)<EOL><DEDENT><DEDENT>elif type(v) == dict:<EOL><INDENT>if not k in target:<EOL><INDENT>target[k] = copy.deepcopy(v)<EOL><DEDENT>else:<EOL><INDENT>deepupdate(target[k], v, overwrite=overwrite)<EOL><DEDENT><DEDENT>elif type(v) == set:<EOL><INDENT>if not k in target:<EOL><INDENT>target[k] = v.copy()<EOL><DEDENT>elif overwrite is True:<EOL><INDENT>if type(target[k]) == list:<EOL><INDENT>target[k].extend(v)<EOL><DEDENT>elif type(target[k]) == set:<EOL><INDENT>target[k].update(v)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>".format(type(target[k]), type(v)))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if k not in target or overwrite is True:<EOL><INDENT>target[k] = copy.copy(v)<EOL><DEDENT><DEDENT><DEDENT>
Deep update target list, dict or set or other iterable with src For each k,v in src: if k doesn't exist in target, it is deep copied from src to target. Otherwise, if v is a list, target[k] is extended with src[k]. If v is a set, target[k] is updated with v, If v is a dict, recursively deep-update it. If `overwrite` is False, existing values in target will not be overwritten. Examples: >>> t = {'name': 'Ferry', 'hobbies': ['programming', 'sci-fi']} >>> deepupdate(t, {'hobbies': ['gaming']}) >>> print t {'name': 'Ferry', 'hobbies': ['programming', 'sci-fi', 'gaming']}
f8288:m1
def find_path(dirs, path_to_find):
for dir in dirs:<EOL><INDENT>if os.path.exists(os.path.join(dir, path_to_find)):<EOL><INDENT>return dir<EOL><DEDENT><DEDENT>return None<EOL>
Go through a bunch of dirs and see if dir+path_to_find exists there. Returns the first dir that matches. Otherwise, return None.
f8288:m2
def to_bool(s):
if isinstance(s, bool):<EOL><INDENT>return s<EOL><DEDENT>elif s.lower() in ['<STR_LIT:true>', '<STR_LIT:1>']:<EOL><INDENT>return True<EOL><DEDENT>elif s.lower() in ['<STR_LIT:false>', '<STR_LIT:0>']:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>" % (s))<EOL><DEDENT>
Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false', 'False', 0. Examples: >>> to_bool("true") True >>> to_bool("0") False >>> to_bool(True) True
f8288:m3
def _tpl_possibilities(self):
tpl_possibilities = [<EOL>os.path.realpath(self.tpl)<EOL>]<EOL>for tpl_dir in self.tpl_dirs:<EOL><INDENT>tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "<STR_LIT>".format(self.tpl))))<EOL>tpl_possibilities.append(os.path.realpath(os.path.join(tpl_dir, "<STR_LIT>".format(self.tpl))))<EOL><DEDENT>return tpl_possibilities<EOL>
Construct a list of possible paths to templates.
f8289:c0:m1
def _find_tpl(self):
for tpl_possibility in self.tpl_possibilities:<EOL><INDENT>if os.path.isfile(tpl_possibility):<EOL><INDENT>return tpl_possibility<EOL><DEDENT><DEDENT>return None<EOL>
Find a template in the list of possible paths.
f8289:c0:m2
def render(self, hosts, vars={}):
if self.tpl_file.endswith("<STR_LIT>"):<EOL><INDENT>return self._render_mako(hosts, vars)<EOL><DEDENT>elif self.tpl_file.endswith("<STR_LIT>"):<EOL><INDENT>return self._render_py(hosts, vars)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>".format(self.tpl_file))<EOL><DEDENT>
Render a mako or .py file.
f8289:c0:m3
def _parse_hosts_contents(self, hosts_contents):
sections = []<EOL>cur_section = {<EOL>'<STR_LIT:type>': '<STR_LIT>',<EOL>'<STR_LIT:name>': None,<EOL>'<STR_LIT>': []<EOL>}<EOL>for line in hosts_contents:<EOL><INDENT>line = line.strip()<EOL>if line.startswith('<STR_LIT:#>') or not line:<EOL><INDENT>continue<EOL><DEDENT>elif line.startswith('<STR_LIT:[>'):<EOL><INDENT>sections.append(cur_section)<EOL>section_type, name = self._parse_line_section(line)<EOL>cur_section = {<EOL>'<STR_LIT:type>': section_type,<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': []<EOL>}<EOL><DEDENT>else:<EOL><INDENT>name, vars = self._parse_line_entry(line, cur_section['<STR_LIT:type>'])<EOL>entry = {<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': vars<EOL>}<EOL>cur_section['<STR_LIT>'].append(entry)<EOL><DEDENT><DEDENT>sections.append(cur_section)<EOL>return sections<EOL>
Parse the inventory contents. This returns a list of sections found in the inventory, which can then be used to figure out which hosts belong to which groups and such. Each section has a name, a type ('hosts', 'children', 'vars') and a list of entries for that section. Entries consist of a hostname and the variables. For 'vars' sections, the hostname is None. For example: [production:children] frontend purpose="web" db purpose="db" Returns: { 'name': 'production', 'type': 'children', 'entries': [ {'name': 'frontend', 'hostvars': {'purpose': 'web'}}, {'name': 'db', 'hostvars': {'purpose': 'db'}}, ] }
f8290:c0:m1
def _parse_line_section(self, line):
m = re.match("<STR_LIT>", line)<EOL>group_def = m.groups()[<NUM_LIT:0>]<EOL>if '<STR_LIT::>' in group_def:<EOL><INDENT>group_name, group_type = group_def.split('<STR_LIT::>')<EOL><DEDENT>else:<EOL><INDENT>group_name = group_def<EOL>group_type = '<STR_LIT>'<EOL><DEDENT>return (group_type, group_name)<EOL>
Parse a line containing a group definition. Returns a tuple: (group_type, group_name), where group_type is in the set ('hosts', 'children', 'vars'). For example: [prod] Returns: ('hosts', 'prod') For example: [prod:children] Returns: ('children', 'prod')
f8290:c0:m2
def _parse_line_entry(self, line, type):
name = None<EOL>key_values = {}<EOL>if type == '<STR_LIT>':<EOL><INDENT>key_values = self._parse_line_vars(line)<EOL><DEDENT>else:<EOL><INDENT>tokens = shlex.split(line.strip())<EOL>name = tokens.pop(<NUM_LIT:0>)<EOL>try:<EOL><INDENT>key_values = self._parse_vars(tokens)<EOL><DEDENT>except ValueError:<EOL><INDENT>self.log.warning("<STR_LIT>".format(line))<EOL>return (name, {})<EOL><DEDENT><DEDENT>return (name, key_values)<EOL>
Parse a section entry line into its components. In case of a 'vars' section, the first field will be None. Otherwise, the first field will be the unexpanded host or group name the variables apply to. For example: [production:children] frontend purpose="web" # The line we process Returns: ('frontend', {'purpose': 'web'}) For example: [production:vars] purpose="web" # The line we process Returns: (None, {'purpose': 'web'}) Undocumented feature: [prod:vars] json_like_vars=[{'name': 'htpasswd_auth'}] Returns: (None, {'name': 'htpasswd_auth'})
f8290:c0:m3
def _parse_line_vars(self, line):
key_values = {}<EOL>k, v = line.strip().split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>if v.startswith('<STR_LIT:[>'):<EOL><INDENT>try:<EOL><INDENT>list_res = ihateyaml.safe_load(v)<EOL>if isinstance(list_res[<NUM_LIT:0>], dict):<EOL><INDENT>key_values = list_res[<NUM_LIT:0>]<EOL>return key_values<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>tokens = shlex.split(line.strip())<EOL>key_values = self._parse_vars(tokens)<EOL>return key_values<EOL>
Parse a line in a [XXXXX:vars] section.
f8290:c0:m4
def _parse_vars(self, tokens):
key_values = {}<EOL>for token in tokens:<EOL><INDENT>if token.startswith('<STR_LIT:#>'):<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>k, v = token.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>key = k.strip()<EOL>key_values[key] = v.strip()<EOL><DEDENT><DEDENT>return key_values<EOL>
Given an iterable of tokens, returns variables and their values as a dictionary. For example: ['dtap=prod', 'comment=some comment'] Returns: {'dtap': 'prod', 'comment': 'some comment'}
f8290:c0:m5
def _get_distinct_hostnames(self):
hostnames = []<EOL>for section in self.sections:<EOL><INDENT>hostnames.extend(self._group_get_hostnames(section['<STR_LIT:name>']))<EOL><DEDENT>return set(hostnames)<EOL>
Return a set of distinct hostnames found in the entire inventory.
f8290:c0:m6
def _apply_section(self, section, hosts):
<EOL>if section['<STR_LIT:name>'] is not None:<EOL><INDENT>for hostname in self._group_get_hostnames(section['<STR_LIT:name>']):<EOL><INDENT>hosts[hostname]['<STR_LIT>'].add(section['<STR_LIT:name>'])<EOL><DEDENT><DEDENT>func_map = {<EOL>"<STR_LIT>": self._apply_section_hosts,<EOL>"<STR_LIT>": self._apply_section_children,<EOL>"<STR_LIT>": self._apply_section_vars,<EOL>}<EOL>func = func_map[section['<STR_LIT:type>']]<EOL>func(section, hosts)<EOL>
Recursively find all the hosts that belong in or under a section and add the section's group name and variables to every host.
f8290:c0:m7
def _apply_section_hosts(self, section, hosts):
for entry in section['<STR_LIT>']:<EOL><INDENT>for hostname in self.expand_hostdef(entry['<STR_LIT:name>']):<EOL><INDENT>if hostname not in hosts:<EOL><INDENT>continue<EOL><DEDENT>host = hosts[hostname]<EOL>for var_key, var_val in entry['<STR_LIT>'].items():<EOL><INDENT>host['<STR_LIT>'][var_key] = var_val<EOL><DEDENT><DEDENT><DEDENT>
Add the variables for each entry in a 'hosts' section to the hosts belonging to that entry.
f8290:c0:m8
def _apply_section_children(self, section, hosts):
for entry in section['<STR_LIT>']:<EOL><INDENT>for hostname in self._group_get_hostnames(entry['<STR_LIT:name>']):<EOL><INDENT>host = hosts[hostname]<EOL>for var_key, var_val in entry['<STR_LIT>'].items():<EOL><INDENT>host['<STR_LIT>'][var_key] = var_val<EOL><DEDENT><DEDENT><DEDENT>
Add the variables for each entry in a 'children' section to the hosts belonging to that entry.
f8290:c0:m9
def _apply_section_vars(self, section, hosts):
for hostname in self._group_get_hostnames(section['<STR_LIT:name>']):<EOL><INDENT>host = hosts[hostname]<EOL>for entry in section['<STR_LIT>']:<EOL><INDENT>for var_key, var_val in entry['<STR_LIT>'].items():<EOL><INDENT>host['<STR_LIT>'][var_key] = var_val<EOL><DEDENT><DEDENT><DEDENT>
Apply the variables in a 'vars' section to each host belonging to the group the section refers to.
f8290:c0:m10
def _group_get_hostnames(self, group_name):
hostnames = []<EOL>hosts_section = self._get_section(group_name, '<STR_LIT>')<EOL>if hosts_section:<EOL><INDENT>for entry in hosts_section['<STR_LIT>']:<EOL><INDENT>hostnames.extend(self.expand_hostdef(entry['<STR_LIT:name>']))<EOL><DEDENT><DEDENT>children_section = self._get_section(group_name, '<STR_LIT>')<EOL>if children_section:<EOL><INDENT>for entry in children_section['<STR_LIT>']:<EOL><INDENT>hostnames.extend(self._group_get_hostnames(entry['<STR_LIT:name>']))<EOL><DEDENT><DEDENT>return hostnames<EOL>
Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups.
f8290:c0:m11
def _get_section(self, name, type):
for section in self.sections:<EOL><INDENT>if section['<STR_LIT:name>'] == name and section['<STR_LIT:type>'] == type:<EOL><INDENT>return section<EOL><DEDENT><DEDENT>return None<EOL>
Find and return a section with `name` and `type`
f8290:c0:m12
def expand_hostdef(self, hostdef):
try:<EOL><INDENT>hosts_todo = [hostdef]<EOL>hosts_done = []<EOL>while hosts_todo:<EOL><INDENT>host = hosts_todo.pop(<NUM_LIT:0>)<EOL>if '<STR_LIT:[>' not in host:<EOL><INDENT>hosts_done.append(host)<EOL>continue<EOL><DEDENT>head, rest = host.split('<STR_LIT:[>', <NUM_LIT:1>)<EOL>pattern, tail = rest.split('<STR_LIT:]>', <NUM_LIT:1>)<EOL>start, end = pattern.split('<STR_LIT::>')<EOL>fill = False<EOL>if start.startswith('<STR_LIT:0>') and len(start) > <NUM_LIT:0>:<EOL><INDENT>fill = len(start)<EOL><DEDENT>try:<EOL><INDENT>for i in range(int(start), int(end) + <NUM_LIT:1>):<EOL><INDENT>if fill:<EOL><INDENT>range_nr = str(i).zfill(fill)<EOL><DEDENT>else:<EOL><INDENT>range_nr = i<EOL><DEDENT>new_host = '<STR_LIT>'.format(head, range_nr, tail)<EOL>if '<STR_LIT:[>' in new_host:<EOL><INDENT>hosts_todo.append(new_host)<EOL><DEDENT>else:<EOL><INDENT>hosts_done.append(new_host)<EOL><DEDENT><DEDENT><DEDENT>except ValueError:<EOL><INDENT>for i in range(ord(start), ord(end) + <NUM_LIT:1>):<EOL><INDENT>new_host = '<STR_LIT>'.format(head, chr(i), tail)<EOL>if '<STR_LIT:[>' in new_host:<EOL><INDENT>hosts_todo.append(new_host)<EOL><DEDENT>else:<EOL><INDENT>hosts_done.append(new_host)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return [host_name.split('<STR_LIT::>')[<NUM_LIT:0>] for host_name in hosts_done]<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.log.warning("<STR_LIT>".format(hostdef, e))<EOL>return []<EOL><DEDENT>
Expand a host definition (e.g. "foo[001:010].bar.com") into seperate hostnames. Supports zero-padding, numbered ranges and alphabetical ranges. Multiple patterns in a host defnition are also supported. Returns a list of the fully expanded hostnames. Ports are also removed from hostnames as a bonus (e.g. "foo.bar.com:8022" -> "foo.bar.com")
f8290:c0:m13
def _get_host(self, hostname):
if hostname not in self.hosts:<EOL><INDENT>self.hosts[hostname] = {<EOL>'<STR_LIT>': set(),<EOL>'<STR_LIT>': {}<EOL>}<EOL><DEDENT>return self.hosts[hostname]<EOL>
Get an existing host or otherwise initialize a new empty one.
f8290:c1:m1
def _parse_group(self, group_name, group):
if type(group) == dict:<EOL><INDENT>hostnames_in_group = set()<EOL>for hostname in group.get('<STR_LIT>', []):<EOL><INDENT>self._get_host(hostname)['<STR_LIT>'].add(group_name)<EOL>hostnames_in_group.add(hostname)<EOL><DEDENT>for var_key, var_val in group.get('<STR_LIT>', {}).items():<EOL><INDENT>for hostname in hostnames_in_group:<EOL><INDENT>self._get_host(hostname)['<STR_LIT>'][var_key] = var_val<EOL><DEDENT><DEDENT><DEDENT>elif type(group) == list:<EOL><INDENT>for hostname in group:<EOL><INDENT>self._get_host(hostname)['<STR_LIT>'].add(group_name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.log.warning("<STR_LIT>".format(type(group)))<EOL><DEDENT>
Parse a group definition from a dynamic inventory. These are top-level elements which are not '_meta(data)'.
f8290:c1:m2
def _parse_meta(self, meta):
for hostname, hostvars in meta.get('<STR_LIT>', {}).items():<EOL><INDENT>for var_key, var_val in hostvars.items():<EOL><INDENT>self._get_host(hostname)['<STR_LIT>'][var_key] = var_val<EOL><DEDENT><DEDENT>
Parse the _meta element from a dynamic host inventory output.
f8290:c1:m3
def strip_exts(s, exts):
f_split = os.path.splitext(s)<EOL>if f_split[<NUM_LIT:1>] in exts:<EOL><INDENT>return f_split[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return s<EOL><DEDENT>
Given a string and an interable of extensions, strip the extenion off the string if the string ends with one of the extensions.
f8293:m0
def __init__(self, fact_dirs, inventory_paths=None, fact_cache=False,<EOL>limit=None, debug=False):
self.fact_dirs = fact_dirs<EOL>if inventory_paths is None:<EOL><INDENT>self.inventory_paths = []<EOL><DEDENT>else:<EOL><INDENT>self.inventory_paths = inventory_paths<EOL><DEDENT>self.fact_cache = fact_cache <EOL>self.limit = self._parse_limit(limit)<EOL>self.debug = debug<EOL>self.hosts = {}<EOL>self.log = logging.getLogger(__name__)<EOL>for fact_dir in self.fact_dirs:<EOL><INDENT>self._parse_fact_dir(fact_dir, self.fact_cache)<EOL><DEDENT>for inventory_path in self.inventory_paths:<EOL><INDENT>self._handle_inventory(inventory_path)<EOL><DEDENT>for inventory_path in self.inventory_paths:<EOL><INDENT>self._parse_hostvar_dir(inventory_path)<EOL><DEDENT>for inventory_path in self.inventory_paths:<EOL><INDENT>self._parse_groupvar_dir(inventory_path)<EOL><DEDENT>
`fact_dirs` is a list of paths to directories containing facts gathered by ansible's 'setup' module. `inventory_paths` is a list with files or directories containing the inventory. It will be scanned to extract groups, variables and additional facts. If entries point to a file, it's read as a hosts file. If it's a directory, it is scanned for hosts files and dynamic inventory scripts.
f8293:c0:m0
def _parse_limit(self, limit):
if limit is None:<EOL><INDENT>return None<EOL><DEDENT>limit_parsed = {<EOL>"<STR_LIT>": [],<EOL>"<STR_LIT>": []<EOL>}<EOL>elems = limit.split("<STR_LIT::>")<EOL>for elem in elems:<EOL><INDENT>if elem.startswith('<STR_LIT:!>'):<EOL><INDENT>limit_parsed['<STR_LIT>'].append(elem[<NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>limit_parsed['<STR_LIT>'].append(elem)<EOL><DEDENT><DEDENT>return limit_parsed<EOL>
Parse a host / group limit in the form of a string (e.g. 'all:!cust.acme') into a dict of things to be included and things to be excluded.
f8293:c0:m1
def _handle_inventory(self, inventory_path):
self.log.debug("<STR_LIT>".format(inventory_path))<EOL>if os.path.isfile(inventory_path) andutil.is_executable(inventory_path):<EOL><INDENT>self.log.debug("<STR_LIT>".format(inventory_path))<EOL>self._parse_dyn_inventory(inventory_path)<EOL><DEDENT>elif os.path.isfile(inventory_path):<EOL><INDENT>self.log.debug("<STR_LIT>".format(inventory_path))<EOL>self._parse_hosts_inventory(inventory_path)<EOL><DEDENT>elif os.path.isdir(inventory_path):<EOL><INDENT>self.log.debug("<STR_LIT>".format(inventory_path))<EOL>if any(os.path.basename(inventory_path) == name for name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>return<EOL><DEDENT>for fname in os.listdir(inventory_path):<EOL><INDENT>if any(fname.endswith(ext) for ext in ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]):<EOL><INDENT>continue<EOL><DEDENT>self._handle_inventory(os.path.join(inventory_path, fname))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise IOError("<STR_LIT>".format(inventory_path))<EOL><DEDENT>
Scan inventory. As Ansible is a big mess without any kind of preconceived notion of design, there are several (and I use that word lightly) different ways inventory_path can be handled: - a non-executable file: handled as a Ansible 'hosts' file. - an executable file: handled as a dynamic inventory file. - a directory: scanned for Ansible 'hosts' and dynamic inventory files.
f8293:c0:m2
def _parse_hosts_inventory(self, inventory_path):
hosts_contents = []<EOL>if os.path.isdir(inventory_path):<EOL><INDENT>self.log.debug("<STR_LIT>".format(inventory_path))<EOL>for fname in os.listdir(inventory_path):<EOL><INDENT>if fname == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>path = os.path.join(inventory_path, fname)<EOL>if os.path.isdir(path):<EOL><INDENT>continue<EOL><DEDENT>with codecs.open(path, '<STR_LIT:r>', encoding='<STR_LIT:utf8>') as f:<EOL><INDENT>hosts_contents += f.readlines()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.log.debug("<STR_LIT>".format(inventory_path))<EOL>with codecs.open(inventory_path, '<STR_LIT:r>', encoding='<STR_LIT:utf8>') as f:<EOL><INDENT>hosts_contents = f.readlines()<EOL><DEDENT><DEDENT>hosts_parser = parser.HostsParser(hosts_contents)<EOL>for hostname, key_values in hosts_parser.hosts.items():<EOL><INDENT>self.update_host(hostname, key_values)<EOL><DEDENT>
Read all the available hosts inventory information into one big list and parse it.
f8293:c0:m3
def _parse_hostvar_dir(self, inventory_path):
<EOL>if os.path.isdir(inventory_path):<EOL><INDENT>path = os.path.join(inventory_path, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>path = os.path.join(os.path.dirname(inventory_path), '<STR_LIT>')<EOL><DEDENT>self.log.debug("<STR_LIT>".format(path))<EOL>if not os.path.exists(path):<EOL><INDENT>self.log.info("<STR_LIT>".format(path))<EOL>return<EOL><DEDENT>for entry in os.listdir(path):<EOL><INDENT>if entry == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>full_path = os.path.join(path, entry)<EOL>hostname = strip_exts(entry, ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>if os.path.isfile(full_path):<EOL><INDENT>self._parse_hostvar_file(hostname, full_path)<EOL><DEDENT>elif os.path.isdir(full_path):<EOL><INDENT>for file_entry in os.listdir(full_path):<EOL><INDENT>p = os.path.join(full_path, file_entry)<EOL>if not os.path.isdir(p):<EOL><INDENT>self._parse_hostvar_file(hostname, p)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Parse host_vars dir, if it exists.
f8293:c0:m4
def _parse_hostvar_file(self, hostname, path):
<EOL>first_line = open(path, '<STR_LIT:r>').readline()<EOL>if first_line.startswith('<STR_LIT>'):<EOL><INDENT>self.log.warning("<STR_LIT>".format(path))<EOL>return<EOL><DEDENT>try:<EOL><INDENT>self.log.debug("<STR_LIT>".format(path))<EOL>f = codecs.open(path, '<STR_LIT:r>', encoding='<STR_LIT:utf8>')<EOL>invars = ihateyaml.safe_load(f)<EOL>f.close()<EOL><DEDENT>except Exception as err:<EOL><INDENT>self.log.warning("<STR_LIT>".format(path, err))<EOL>return<EOL><DEDENT>if invars is None:<EOL><INDENT>return<EOL><DEDENT>if hostname == "<STR_LIT:all>":<EOL><INDENT>for hostname in self.hosts_all():<EOL><INDENT>self.update_host(hostname, {'<STR_LIT>': invars}, overwrite=False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.update_host(hostname, {'<STR_LIT>': invars}, overwrite=True)<EOL><DEDENT>
Parse a host var file and apply it to host `hostname`.
f8293:c0:m5
def _parse_groupvar_dir(self, inventory_path):
<EOL>if os.path.isdir(inventory_path):<EOL><INDENT>path = os.path.join(inventory_path, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>path = os.path.join(os.path.dirname(inventory_path), '<STR_LIT>')<EOL><DEDENT>self.log.debug("<STR_LIT>".format(path))<EOL>if not os.path.exists(path):<EOL><INDENT>self.log.info("<STR_LIT>".format(path))<EOL>return<EOL><DEDENT>for (dirpath, dirnames, filenames) in os.walk(path):<EOL><INDENT>for filename in filenames:<EOL><INDENT>full_path = os.path.join(dirpath, filename)<EOL>groupname = strip_exts(filename, ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>try:<EOL><INDENT>self.log.debug("<STR_LIT>".format(full_path))<EOL>f = codecs.open(full_path, '<STR_LIT:r>', encoding='<STR_LIT:utf8>')<EOL>invars = ihateyaml.safe_load(f)<EOL>f.close()<EOL><DEDENT>except Exception as err:<EOL><INDENT>self.log.warning("<STR_LIT>".format(full_path, err))<EOL>continue <EOL><DEDENT>if groupname == '<STR_LIT:all>':<EOL><INDENT>for hostname in self.hosts_all():<EOL><INDENT>self.update_host(hostname, {'<STR_LIT>': invars}, overwrite=False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for hostname in self.hosts_in_group(groupname):<EOL><INDENT>self.update_host(hostname, {'<STR_LIT>': invars}, overwrite=False)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Parse group_vars dir, if it exists. Encrypted vault files are skipped.
f8293:c0:m6
def _parse_fact_dir(self, fact_dir, fact_cache=False):
self.log.debug("<STR_LIT>".format(fact_dir))<EOL>if not os.path.isdir(fact_dir):<EOL><INDENT>raise IOError("<STR_LIT>".format(fact_dir))<EOL><DEDENT>flist = []<EOL>for (dirpath, dirnames, filenames) in os.walk(fact_dir):<EOL><INDENT>flist.extend(filenames)<EOL>break<EOL><DEDENT>for fname in flist:<EOL><INDENT>if fname.startswith('<STR_LIT:.>'):<EOL><INDENT>continue<EOL><DEDENT>self.log.debug("<STR_LIT>".format(os.path.join(fact_dir, fname)))<EOL>hostname = fname<EOL>fd = codecs.open(os.path.join(fact_dir, fname), '<STR_LIT:r>', encoding='<STR_LIT:utf8>')<EOL>s = fd.readlines()<EOL>fd.close()<EOL>try:<EOL><INDENT>x = json.loads('<STR_LIT>'.join(s))<EOL>if fact_cache:<EOL><INDENT>x = json.loads('<STR_LIT>' + '<STR_LIT>'.join(s) + '<STR_LIT>')<EOL><DEDENT>self.update_host(hostname, x)<EOL>self.update_host(hostname, {'<STR_LIT:name>': hostname})<EOL><DEDENT>except ValueError as e:<EOL><INDENT>self.log.warning("<STR_LIT>" % (fname, e))<EOL><DEDENT><DEDENT>
Walk through a directory of JSON files and extract information from them. This is used for both the Ansible fact gathering (setup module) output and custom variables.
f8293:c0:m7
def _parse_dyn_inventory(self, script):
self.log.debug("<STR_LIT>".format(script))<EOL>try:<EOL><INDENT>proc = subprocess.Popen([script, '<STR_LIT>'],<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>close_fds=True)<EOL>stdout, stderr = proc.communicate(input)<EOL>if proc.returncode != <NUM_LIT:0>:<EOL><INDENT>sys.stderr.write("<STR_LIT>"<EOL>"<STR_LIT>".format(script,<EOL>proc.returncode))<EOL>for line in stderr:<EOL><INDENT>sys.stderr.write(line)<EOL><DEDENT><DEDENT>dyninv_parser = parser.DynInvParser(stdout.decode('<STR_LIT:utf8>'))<EOL>for hostname, key_values in dyninv_parser.hosts.items():<EOL><INDENT>self.update_host(hostname, key_values)<EOL><DEDENT><DEDENT>except OSError as err:<EOL><INDENT>sys.stderr.write("<STR_LIT>".format(script))<EOL>sys.stderr.write(str(err) + '<STR_LIT:\n>')<EOL><DEDENT>
Execute a dynamic inventory script and parse the results.
f8293:c0:m8
def update_host(self, hostname, key_values, overwrite=True):
default_empty_host = {<EOL>'<STR_LIT:name>': hostname,<EOL>'<STR_LIT>': {},<EOL>}<EOL>host_info = self.hosts.get(hostname, default_empty_host)<EOL>util.deepupdate(host_info, key_values, overwrite=overwrite)<EOL>self.hosts[hostname] = host_info<EOL>
Update a hosts information. This is called by various collectors such as the ansible setup module output and the hosts parser to add informatio to a host. It does some deep inspection to make sure nested information can be updated.
f8293:c0:m9
def hosts_all(self):
return [hostname for hostname, hostinfo in self.hosts.items()]<EOL>
Return a list of all hostnames.
f8293:c0:m10
def hosts_in_group(self, groupname):
result = []<EOL>for hostname, hostinfo in self.hosts.items():<EOL><INDENT>if groupname == '<STR_LIT:all>':<EOL><INDENT>result.append(hostname)<EOL><DEDENT>elif '<STR_LIT>' in hostinfo:<EOL><INDENT>if groupname in hostinfo['<STR_LIT>']:<EOL><INDENT>result.append(hostname)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>hostinfo['<STR_LIT>'] = [groupname]<EOL><DEDENT><DEDENT>return result<EOL>
Return a list of hostnames that are in a group.
f8293:c0:m11
def get_hosts(self):
limited_hosts = {}<EOL>if self.limit is not None:<EOL><INDENT>for include in self.limit['<STR_LIT>']:<EOL><INDENT>for hostname in self.hosts_in_group(include):<EOL><INDENT>limited_hosts[hostname] = self.hosts[hostname]<EOL><DEDENT>if include in self.hosts:<EOL><INDENT>limited_hosts[include] = self.hosts[include]<EOL><DEDENT><DEDENT>for exclude in self.limit["<STR_LIT>"]:<EOL><INDENT>for hostname in self.hosts_in_group(exclude):<EOL><INDENT>if hostname in limited_hosts:<EOL><INDENT>limited_hosts.pop(hostname)<EOL><DEDENT><DEDENT>if exclude in limited_hosts:<EOL><INDENT>limited_hosts.pop(exclude)<EOL><DEDENT><DEDENT>return limited_hosts<EOL><DEDENT>else:<EOL><INDENT>return self.hosts<EOL><DEDENT>
Return a list of parsed hosts info, with the limit applied if required.
f8293:c0:m12
def get_logger():
root = logging.getLogger()<EOL>root.setLevel(logging.WARNING)<EOL>ch = logging.StreamHandler(sys.stderr)<EOL>ch.setLevel(logging.DEBUG)<EOL>formatter = logging.Formatter('<STR_LIT>')<EOL>ch.setFormatter(formatter)<EOL>root.addHandler(ch)<EOL>return root<EOL>
Instantiate a logger.
f8296:m0
def get_data_dir():
data_dir_paths = [<EOL>os.path.join(os.path.dirname(ansiblecmdb.__file__), '<STR_LIT:data>'),<EOL>os.path.join(os.path.dirname(sys.argv[<NUM_LIT:0>]), '<STR_LIT:..>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:data>'),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>data_dir = util.find_path(data_dir_paths, '<STR_LIT>')<EOL>if not data_dir:<EOL><INDENT>sys.stdout.write("<STR_LIT>".format("<STR_LIT:U+002CU+0020>".join(data_dir_paths)))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return data_dir<EOL>
Find out our installation prefix and data directory. These can be in different places depending on how ansible-cmdb was installed.
f8296:m1
def get_hosts_files(option):
if option is not None:<EOL><INDENT>return option.split('<STR_LIT:U+002C>')<EOL><DEDENT>if os.path.isfile('<STR_LIT>'):<EOL><INDENT>return ['<STR_LIT>']<EOL><DEDENT>config_locations = [<EOL>'<STR_LIT:.>',<EOL>'<STR_LIT>'<EOL>]<EOL>config_dir = util.find_path(config_locations, '<STR_LIT>')<EOL>log.debug('<STR_LIT>'.format(config_dir))<EOL>if config_dir:<EOL><INDENT>with open(os.path.join(config_dir, '<STR_LIT>'), '<STR_LIT:r>') as cf:<EOL><INDENT>for line in cf:<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>return [line.split('<STR_LIT:=>', <NUM_LIT:1>)[<NUM_LIT:1>].strip()]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Find out the location of the `hosts` file. This looks in multiple places such as the `-i` option, current dir and ansible configuration files. The first match is returned as a list.
f8296:m2
def get_cust_cols(path):
required_keys = ["<STR_LIT:title>", "<STR_LIT:id>", "<STR_LIT>", "<STR_LIT>"]<EOL>with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>try:<EOL><INDENT>cust_cols = ast.literal_eval(f.read())<EOL><DEDENT>except Exception as err:<EOL><INDENT>sys.stderr.write("<STR_LIT>".format(path))<EOL>sys.stderr.write("<STR_LIT>".format(err))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>for col in cust_cols:<EOL><INDENT>for required_key in required_keys:<EOL><INDENT>if required_key not in col:<EOL><INDENT>sys.stderr.write("<STR_LIT>"<EOL>"<STR_LIT>".format(required_key, col))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if "<STR_LIT>" not in col and "<STR_LIT>" not in col:<EOL><INDENT>sys.stderr.write("<STR_LIT>"<EOL>"<STR_LIT>".format(col))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>return cust_cols<EOL>
Load custom column definitions.
f8296:m3
def parse_user_params(user_params):
if user_params:<EOL><INDENT>params = {}<EOL>try:<EOL><INDENT>for param in options.params.split('<STR_LIT:U+002C>'):<EOL><INDENT>param_key, param_value = param.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>params[param_key] = param_value<EOL><DEDENT><DEDENT>except ValueError as e:<EOL><INDENT>sys.stdout.write("<STR_LIT>")<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return params<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>
Parse the user params (-p/--params) and them as a dict.
f8296:m4
def _ordinal_choice(self, create=True, **kwargs):
if "<STR_LIT>" not in kwargs:<EOL><INDENT>kwargs["<STR_LIT>"] = self.scale<EOL><DEDENT>if create is True:<EOL><INDENT>return OrdinalChoiceFactory.create(**kwargs)<EOL><DEDENT>else:<EOL><INDENT>return OrdinalChoiceFactory.build(**kwargs)<EOL><DEDENT>
Create an ordinal choice. Requires `score` kwarg.
f8300:c0:m1
def _survey(self, **kwargs):
if "<STR_LIT:user>" not in kwargs:<EOL><INDENT>kwargs["<STR_LIT:user>"] = self.user<EOL><DEDENT>return SurveyFactory.create(**kwargs)<EOL>
Create a Survey
f8300:c0:m2
def _surveyresult(self, **kwargs):
if "<STR_LIT:user>" not in kwargs:<EOL><INDENT>kwargs["<STR_LIT:user>"] = self.user<EOL><DEDENT>return SurveyResultFactory.create(**kwargs)<EOL>
Create a SurveyResult
f8300:c0:m3
def _fieldresult(self, create=True, **kwargs):
if "<STR_LIT>" not in kwargs:<EOL><INDENT>kwargs["<STR_LIT>"] = self.field<EOL><DEDENT>if create is True:<EOL><INDENT>return FieldResultFactory.create(**kwargs)<EOL><DEDENT>else:<EOL><INDENT>return FieldResultFactory.build(**kwargs)<EOL><DEDENT>
Create a SurveyResult
f8300:c0:m7
@property<EOL><INDENT>def field_type_choices(self):<DEDENT>
from formly.models import Field<EOL>return Field.FIELD_TYPE_CHOICES<EOL>
Customize available field type choices when designing a survey default: Field.FIELD_TYPE_CHOICES
f8321:c0:m0
def _get_field_class(self, choices):
field_class = forms.CharField<EOL>kwargs = dict(<EOL>label=self.label,<EOL>help_text=self.help_text,<EOL>required=self.required<EOL>)<EOL>field_type = FIELD_TYPES.get(self.field_type, {})<EOL>field_class = field_type.get("<STR_LIT>", field_class)<EOL>kwargs.update(**field_type.get("<STR_LIT>", {}))<EOL>if self.field_type in [Field.CHECKBOX_FIELD, Field.SELECT_FIELD, Field.RADIO_CHOICES, Field.LIKERT_FIELD, Field.RATING_FIELD]:<EOL><INDENT>kwargs.update({"<STR_LIT>": choices})<EOL>if self.field_type == Field.CHECKBOX_FIELD:<EOL><INDENT>kwargs.update({"<STR_LIT>": self.maximum_choices})<EOL><DEDENT><DEDENT>elif self.field_type == Field.MULTIPLE_TEXT:<EOL><INDENT>kwargs.update({<EOL>"<STR_LIT>": self.expected_answers,<EOL>"<STR_LIT>": MultiTextWidget(widgets_length=self.expected_answers),<EOL>})<EOL><DEDENT>return field_class, kwargs<EOL>
Set field_class and field kwargs based on field type
f8332:c4:m9
@require_POST<EOL>@login_required<EOL>def survey_change_name(request, pk):
survey = get_object_or_404(Survey, pk=pk)<EOL>if not request.user.has_perm("<STR_LIT>", obj=survey):<EOL><INDENT>raise PermissionDenied()<EOL><DEDENT>survey.name = request.POST.get("<STR_LIT:name>")<EOL>survey.save()<EOL>return JsonResponse({<EOL>"<STR_LIT:status>": "<STR_LIT:OK>",<EOL>"<STR_LIT:name>": survey.name<EOL>})<EOL>
Works well with: http://www.appelsiini.net/projects/jeditable
f8337:m4
def _import_all_modules():
import traceback<EOL>import os<EOL>global results<EOL>globals_, locals_ = globals(), locals()<EOL>def load_module(modulename, package_module):<EOL><INDENT>try:<EOL><INDENT>names = []<EOL>module = __import__(package_module, globals_, locals_, [modulename])<EOL>for name in module.__dict__:<EOL><INDENT>if not name.startswith('<STR_LIT:_>'):<EOL><INDENT>globals_[name] = module.__dict__[name]<EOL>names.append(name)<EOL><DEDENT><DEDENT><DEDENT>except Exception:<EOL><INDENT>traceback.print_exc()<EOL>raise<EOL><DEDENT>return module, names<EOL><DEDENT>def load_dir(abs_dirpath, rel_dirpath='<STR_LIT>'):<EOL><INDENT>results = []<EOL>for filename in os.listdir(abs_dirpath):<EOL><INDENT>rel_filepath = os.path.join(rel_dirpath, filename)<EOL>abs_filepath = os.path.join(abs_dirpath, filename)<EOL>if filename[<NUM_LIT:0>] != '<STR_LIT:_>' and os.path.isfile(abs_filepath) and filename.split('<STR_LIT:.>')[-<NUM_LIT:1>] in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>modulename = '<STR_LIT:.>'.join(os.path.normpath(os.path.splitext(rel_filepath)[<NUM_LIT:0>]).split(os.sep))<EOL>package_module = '<STR_LIT:.>'.join([__name__, modulename])<EOL>module, names = load_module(modulename, package_module)<EOL>results += names<EOL><DEDENT>elif os.path.isdir(abs_filepath):<EOL><INDENT>results += load_dir(abs_filepath, rel_filepath)<EOL><DEDENT><DEDENT>return results<EOL><DEDENT>return load_dir(os.path.dirname(__file__))<EOL>
dynamically imports all modules in the package
f8342:m0
def conv3x3(in_planes, out_planes, fn, stride=<NUM_LIT:1>):
return fn(in_planes, out_planes, kernel_size=<NUM_LIT:3>, stride=stride, padding=<NUM_LIT:1>, bias=False)<EOL>
3x3 convolution with padding
f8343:m0
def SRU_Compute_CPU(activation_type, d, bidirectional=False, scale_x=<NUM_LIT:1>):
def sru_compute_cpu(u, x, bias, init=None, mask_h=None):<EOL><INDENT>bidir = <NUM_LIT:2> if bidirectional else <NUM_LIT:1><EOL>length = x.size(<NUM_LIT:0>) if x.dim() == <NUM_LIT:3> else <NUM_LIT:1><EOL>batch = x.size(-<NUM_LIT:2>)<EOL>k = u.size(-<NUM_LIT:1>) // d // bidir<EOL>if mask_h is None:<EOL><INDENT>mask_h = <NUM_LIT:1><EOL><DEDENT>u = u.view(length, batch, bidir, d, k)<EOL>x_tilde = u[..., <NUM_LIT:0>]<EOL>forget_bias, reset_bias = bias.view(<NUM_LIT:2>, bidir, d)<EOL>forget = (u[..., <NUM_LIT:1>] + forget_bias).sigmoid()<EOL>reset = (u[..., <NUM_LIT:2>] + reset_bias).sigmoid()<EOL>if k == <NUM_LIT:3>:<EOL><INDENT>x_prime = x.view(length, batch, bidir, d)<EOL>x_prime = x_prime * scale_x if scale_x != <NUM_LIT:1> else x_prime<EOL><DEDENT>else:<EOL><INDENT>x_prime = u[..., <NUM_LIT:3>]<EOL><DEDENT>h = Variable(x.data.new(length, batch, bidir, d))<EOL>if init is None:<EOL><INDENT>c_init = Variable(x.data.new(batch, bidir, d).zero_())<EOL><DEDENT>else:<EOL><INDENT>c_init = init.view(batch, bidir, d)<EOL><DEDENT>c_final = []<EOL>for di in range(bidir):<EOL><INDENT>if di == <NUM_LIT:0>:<EOL><INDENT>time_seq = range(length)<EOL><DEDENT>else:<EOL><INDENT>time_seq = range(length - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>)<EOL><DEDENT>c_prev = c_init[:, di, :]<EOL>for t in time_seq:<EOL><INDENT>c_t = (c_prev - x_tilde[t, :, di, :]) * forget[t, :, di, :] + x_tilde[t, :, di, :]<EOL>c_prev = c_t<EOL>if activation_type == <NUM_LIT:0>:<EOL><INDENT>g_c_t = c_t<EOL><DEDENT>elif activation_type == <NUM_LIT:1>:<EOL><INDENT>g_c_t = c_t.tanh()<EOL><DEDENT>elif activation_type == <NUM_LIT:2>:<EOL><INDENT>g_c_t = nn.functional.relu(c_t)<EOL><DEDENT>else:<EOL><INDENT>assert False, '<STR_LIT>'.format(activation_type)<EOL><DEDENT>h[t, :, di, :] = (g_c_t * mask_h - x_prime[t, :, di, :]) * reset[t, :, di, :] + x_prime[t, :, di, :]<EOL><DEDENT>c_final.append(c_t)<EOL><DEDENT>return h.view(length, batch, -<NUM_LIT:1>), torch.stack(c_final, dim=<NUM_LIT:1>).view(batch, -<NUM_LIT:1>)<EOL><DEDENT>return sru_compute_cpu<EOL>
CPU version of the core SRU computation. Has the same interface as SRU_Compute_GPU() but is a regular Python function instead of a torch.autograd.Function because we don't implement backward() explicitly.
f8346:m0
def found_duplicates(counts):
_logger.warning("<STR_LIT>")<EOL>for marker, count in counts:<EOL><INDENT>_logger.warning("<STR_LIT>".format(marker, count))<EOL><DEDENT>_logger.warning("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>
Log that duplicates were found. :param counts: A list of duplicate marker names along with their number of occurences. :type counts: list
f8359:m2
def strip_key(k):
if k.startswith("<STR_LIT>"):<EOL><INDENT>return k[<NUM_LIT:3>:]<EOL><DEDENT>return k<EOL>
Sanitize variant identifiers. This is necessary because variants with the same chrom, pos and alleles hash to the same value.
f8364:m0
def create_probs_from_genotypes(genotype):
if genotype == <NUM_LIT:0>:<EOL><INDENT>return (<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL><DEDENT>if genotype == <NUM_LIT:1>:<EOL><INDENT>return (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL><DEDENT>if genotype == <NUM_LIT:2>:<EOL><INDENT>return (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>)<EOL><DEDENT>if genotype == -<NUM_LIT:1>:<EOL><INDENT>return (<NUM_LIT>, <NUM_LIT:0.1>, <NUM_LIT:0.1>)<EOL><DEDENT>
Creates probabilities from an additive genotype.
f8377:m0
def __init__(self, dataframe, map_info):
self.df = dataframe<EOL>self.map_info = map_info<EOL>
Reads genotypes from a pandas DataFrame. Args: dataframe (pandas.DataFrame): The data. map_info (pandas.DataFrame): The mapping information. Note ==== The index of the dataframe should be the sample IDs. The index of the map_info should be the variant name, and there should be columns named chrom and pos.
f8380:c0:m0
def iter_genotypes(self):
<EOL>for variant in self.df.columns:<EOL><INDENT>genotypes = self.df.loc[:, variant].values<EOL>info = self.map_info.loc[variant, :]<EOL>yield Genotypes(<EOL>Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),<EOL>genotypes,<EOL>reference=info.a2,<EOL>coded=info.a1,<EOL>multiallelic=False,<EOL>)<EOL><DEDENT>
Iterates on available markers. Returns: Genotypes instances.
f8380:c0:m1