code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
txt_end = "" for startchar, endchar in ["[]", "()"]: if txt.endswith(endchar): pos = txt.rfind(startchar) if pos: txt_end = txt[pos:] txt = txt[:pos] tokens = re.split(SYMBOLS, txt) token = None try: while token is None or re.match(SYMBOLS, token): token = tokens.pop() if token.endswith('.'): token = token[:-1] if token.startswith('.'): # Invalid object name return None if last: #XXX: remove this statement as well as the "last" argument token += txt[ txt.rfind(token) + len(token) ] token += txt_end if token: return token except IndexError: return None
def getobj(txt, last=False)
Return the last valid object name in string
4.258514
4.122998
1.032868
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or '' # Most of the time doc will only contain ascii characters, but there are # some docstrings that contain non-ascii characters. Not all source files # declare their encoding in the first line, so querying for that might not # yield anything, either. So assume the most commonly used # multi-byte file encoding (which also covers ascii). try: docstring = to_text_string(docstring) except: pass # Doc dict keys doc = {'name': '', 'argspec': '', 'note': '', 'docstring': docstring} if callable(obj): try: name = obj.__name__ except AttributeError: doc['docstring'] = docstring return doc if inspect.ismethod(obj): imclass = get_meth_class(obj) if get_meth_class_inst(obj) is not None: doc['note'] = 'Method of %s instance' \ % get_meth_class_inst(obj).__class__.__name__ else: doc['note'] = 'Unbound %s method' % imclass.__name__ obj = get_meth_func(obj) elif hasattr(obj, '__module__'): doc['note'] = 'Function of %s module' % obj.__module__ else: doc['note'] = 'Function' doc['name'] = obj.__name__ if inspect.isfunction(obj): if PY2: args, varargs, varkw, defaults = inspect.getargspec(obj) doc['argspec'] = inspect.formatargspec( args, varargs, varkw, defaults, formatvalue=lambda o:'='+repr(o)) else: (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) = inspect.getfullargspec(obj) doc['argspec'] = inspect.formatargspec( args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations, formatvalue=lambda o:'='+repr(o)) if name == '<lambda>': doc['name'] = name + ' lambda ' doc['argspec'] = doc['argspec'][1:-1] # remove parentheses else: argspec = getargspecfromtext(doc['docstring']) if argspec: doc['argspec'] = argspec # Many scipy and numpy docstrings begin with a function # signature on the first line. This ends up begin redundant # when we are using title and argspec to create the # rich text "Definition:" field. We'll carefully remove this # redundancy but only under a strict set of conditions: # Remove the starting charaters of the 'doc' portion *iff* # the non-whitespace characters on the first line # match *exactly* the combined function title # and argspec we determined above. signature = doc['name'] + doc['argspec'] docstring_blocks = doc['docstring'].split("\n\n") first_block = docstring_blocks[0].strip() if first_block == signature: doc['docstring'] = doc['docstring'].replace( signature, '', 1).lstrip() else: doc['argspec'] = '(...)' # Remove self from argspec argspec = doc['argspec'] doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(') return doc
def getdoc(obj)
Return text documentation from an object. This comes in a form of dictionary with four keys: name: The name of the inspected object argspec: It's argspec note: A phrase describing the type of object (function or method) we are inspecting, and the module it belongs to. docstring: It's docstring
4.415181
4.346876
1.015714
try: try: src = to_text_string(inspect.getsource(obj)) except TypeError: if hasattr(obj, '__class__'): src = to_text_string(inspect.getsource(obj.__class__)) else: # Bindings like VTK or ITK require this case src = getdoc(obj) return src except (TypeError, IOError): return
def getsource(obj)
Wrapper around inspect.getsource
4.635712
4.604617
1.006753
if isinstance(text, dict): text = text.get('docstring', '') # Regexps oneline_re = objname + r'\([^\)].+?(?<=[\w\]\}\'"])\)(?!,)' multiline_re = objname + r'\([^\)]+(?<=[\w\]\}\'"])\)(?!,)' multiline_end_parenleft_re = r'(%s\([^\)]+(\),\n.+)+(?<=[\w\]\}\'"])\))' # Grabbing signatures if not text: text = '' sigs_1 = re.findall(oneline_re + '|' + multiline_re, text) sigs_2 = [g[0] for g in re.findall(multiline_end_parenleft_re % objname, text)] all_sigs = sigs_1 + sigs_2 # The most relevant signature is usually the first one. There could be # others in doctests but those are not so important if all_sigs: return all_sigs[0] else: return ''
def getsignaturefromtext(text, objname)
Get object signatures from text (object documentation) Return a list containing a single string in most cases Example of multiple signatures: PyQt5 objects
4.690475
4.729449
0.991759
blocks = text.split("\n\n") first_block = blocks[0].strip() return getsignaturefromtext(first_block, '')
def getargspecfromtext(text)
Try to get the formatted argspec of a callable from the first block of its docstring This will return something like '(foo, bar, k=1)'
5.441312
5.049148
1.077669
signature = getsignaturefromtext(text, objname) if signature: argtxt = signature[signature.find('(')+1:-1] return argtxt.split(',')
def getargsfromtext(text, objname)
Get arguments from text (object documentation)
4.33743
4.270732
1.015618
if inspect.isfunction(obj) or inspect.isbuiltin(obj): func_obj = obj elif inspect.ismethod(obj): func_obj = get_meth_func(obj) elif inspect.isclass(obj) and hasattr(obj, '__init__'): func_obj = getattr(obj, '__init__') else: return [] if not hasattr(func_obj, 'func_code'): # Builtin: try to extract info from doc args = getargsfromdoc(func_obj) if args is not None: return args else: # Example: PyQt5 return getargsfromdoc(obj) args, _, _ = inspect.getargs(func_obj.func_code) if not args: return getargsfromdoc(obj) # Supporting tuple arguments in def statement: for i_arg, arg in enumerate(args): if isinstance(arg, list): args[i_arg] = "(%s)" % ", ".join(arg) defaults = get_func_defaults(func_obj) if defaults is not None: for index, default in enumerate(defaults): args[index+len(args)-len(defaults)] += '='+repr(default) if inspect.isclass(obj) or inspect.ismethod(obj): if len(args) == 1: return None if 'self' in args: args.remove('self') return args
def getargs(obj)
Get the names and default values of a function's arguments
2.984758
3.007565
0.992417
args = getargs(obj) if args: sep = ', ' textlist = None for i_arg, arg in enumerate(args): if textlist is None: textlist = [''] textlist[-1] += arg if i_arg < len(args)-1: textlist[-1] += sep if len(textlist[-1]) >= 32 or one_arg_per_line: textlist.append('') if inspect.isclass(obj) or inspect.ismethod(obj): if len(textlist) == 1: return None if 'self'+sep in textlist: textlist.remove('self'+sep) return textlist
def getargtxt(obj, one_arg_per_line=True)
Get the names and default values of a function's arguments Return list with separators (', ') formatted for calltips
2.883196
2.761605
1.044029
if namespace is None: namespace = locals() attr_list = obj.split('.') base = attr_list.pop(0) if len(base) == 0: return False if base not in builtins.__dict__ and base not in namespace: if force_import: try: module = __import__(base, globals(), namespace) if base not in globals(): globals()[base] = module namespace[base] = module except Exception: return False else: return False for attr in attr_list: try: attr_not_found = not hasattr(eval(base, namespace), attr) except (SyntaxError, AttributeError): return False if attr_not_found: if force_import: try: __import__(base+'.'+attr, globals(), namespace) except (ImportError, SyntaxError): return False else: return False base += '.'+attr return True
def isdefined(obj, force_import=False, namespace=None)
Return True if object is defined in namespace If namespace is None --> namespace = locals()
2.509975
2.358109
1.064402
return_val = self.handle.read(size) if not return_val: raise IOError else: return return_val
def read(self, size)
Read wrapper. Parameters ---------- size : int Number of bytes to read.
4.518593
6.768247
0.667616
newval = value for pattern, replacement in LATEX_SUBS: newval = pattern.sub(replacement, newval) return newval
def escape_tex(value)
Make text tex safe
5.713959
5.867458
0.973839
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('_'))
def classnameify(s)
Makes a classname
5.588988
6.243351
0.89519
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('.')[-1:])
def packagenameify(s)
Makes a package name
7.244844
7.662844
0.945451
items = [] for f in fields: if f.type_id == "array" and f.options['fill'].value in CONSTRUCT_CODE: prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None if 'size' in f.options: name = "%s[%s]" % (f.options['fill'].value, str(f.options['size'].value)) size = field_sizes[f.options['fill'].value] * f.options['size'].value item = FieldItem(prefix_name, name, offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size else: name = "%s[%s]" % (f.options['fill'].value, "N") multiplier = field_sizes[f.options['fill'].value] size = field_sizes[f.options['fill'].value] * 1 item = FieldItem(prefix_name, name, offset, "N", str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size elif f.type_id == "string": prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None if 'size' in f.options: name = "string" size = field_sizes['u8'] * f.options['size'].value item = FieldItem(prefix_name, name, offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size else: name = "string" size = field_sizes['u8'] multiplier = 1 item = FieldItem(prefix_name, name, offset, "N", str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size elif f.type_id == "array": name = f.options['fill'].value definition = next(d for d in definitions if name == d.identifier) prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier (new_items, new_offset, new_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name + "[N]", offset, multiplier) multiplier = new_offset - offset (newer_items, newer_offset, newer_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name + "[N]", offset, multiplier) items += newer_items offset = newer_offset elif f.type_id not in CONSTRUCT_CODE: name = f.type_id definition = next(d for d in definitions if name == d.identifier) prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier (new_items, new_offset, new_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name, offset, multiplier) items += new_items offset = new_offset multiplier = new_multiplier else: size = field_sizes[f.type_id] name = f.type_id adj_offset = "%dN+%d" % (multiplier, offset) if multiplier else offset prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None item = FieldItem(prefix_name, name, adj_offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size return (items, offset, multiplier)
def handle_fields(definitions, fields, prefix, offset, multiplier)
Helper for handling naming and sizing of fields. It's terrible.
1.87671
1.875134
1.00084
destination_filename = "%s/sbp_out.tex" % output_dir py_template = JENV.get_template(TEMPLATE_NAME) stable_msgs = [] unstable_msgs = [] prims = [] for p in sorted(package_specs, key=attrgetter('identifier')): pkg_name = p.identifier stable = p.stable # build list of required definitions (this package plus includes) # TODO: recursively include files definitions = p.definitions for inc in p.includes: inc_basename = inc.split(".")[0] for pkg in package_specs: if pkg.identifier.endswith(inc_basename): definitions += pkg.definitions if pkg_name == "swiftnav.sbp.types": prims = p.definitions for d in p.definitions: if d.public and d.static and d.sbp_id: items, size, multiplier \ = handle_fields(definitions, d.fields, "", 0, None) adj_size = "" if multiplier == 1: adj_size = "N+%d" % (size - 1) if size > 1 else "N" elif multiplier: if multiplier == size: adj_size = "%dN" % multiplier else: adj_size = "%dN+%d" % (multiplier, size - multiplier) else: adj_size = "%d" % size ti = TableItem(pkg_name, d.identifier, d.sbp_id, d.short_desc, d.desc, adj_size, items, p.stable, p.description) pkg_name = "" if stable: stable_msgs.append(ti) else: unstable_msgs.append(ti) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=stable_msgs, umsgs=unstable_msgs, prims=prims, version=version)) import subprocess import os os.chdir(output_dir) subprocess.call(["pdflatex", "--enable-write18", "-shell-escape", "sbp_out.tex"]) subprocess.call(["mv", "sbp_out.pdf", "../docs/sbp.pdf"])
def render_source(output_dir, package_specs, version)
Render and output
4.200727
4.195823
1.001169
import argparse parser = argparse.ArgumentParser( description="Swift Navigation SBP Example.") parser.add_argument( "-s", "--serial-port", default=[DEFAULT_SERIAL_PORT], nargs=1, help="specify the serial port to use.") parser.add_argument( "-b", "--baud", default=[DEFAULT_SERIAL_BAUD], nargs=1, help="specify the baud rate to use.") parser.add_argument( "-a", "--address", default=[DEFAULT_UDP_ADDRESS], nargs=1, help="specify the serial port to use.") parser.add_argument( "-p", "--udp-port", default=[DEFAULT_UDP_PORT], nargs=1, help="specify the baud rate to use.") return parser.parse_args()
def get_args()
Get and parse arguments.
2.208191
2.129176
1.03711
value = markdown_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
def commentify(value)
Builds a comment.
4.275625
4.108115
1.040776
s0 = "Sbp" + value if value in COLLISIONS else value s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s0) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + "_t"
def convert(value)
Converts to a C language appropriate identifier format.
3.849341
3.243196
1.186898
name = field.type_id if name == "string": return "%s" % ("char") elif name == "array" and field.size: if field.options['fill'].value not in CONSTRUCT_CODE: return "%s" % convert(field.options['fill'].value) else: return "%s" % field.options['fill'].value elif name == "array": return "%s" % convert(field.options['fill'].value) elif name not in CONSTRUCT_CODE: return convert(name) else: return name
def mk_id(field)
Builds an identifier from a field.
3.935534
3.942371
0.998266
name = field.type_id if name == "string" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "string": return "%s[0];" % field.identifier elif name == "array" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "array": return "%s[0];" % field.identifier else: return '%s;' % field.identifier
def mk_size(field)
Builds an identifier for a container type.
2.349031
2.242726
1.0474
path, name = package_spec.filepath destination_filename = "%s/%s.h" % (output_dir, name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, pkg_name=name, filepath="/".join(package_spec.filepath) + ".yaml", max_msgid_len=package_spec.max_msgid_len, description=package_spec.description, timestamp=package_spec.creation_timestamp, include=extensions(package_spec.includes)))
def render_source(output_dir, package_spec)
Render and output to a directory given a package specification.
4.553892
4.458383
1.021422
for index in range(offset, offset + length): data = buf[index] lookup = crc16_tab[((nb.u2(crc) >> 8) & nb.u2(0xFF)) ^ (data & nb.u2(0xFF))] crc = ((nb.u2(crc) << nb.u2(8)) & nb.u2(0xFFFF)) ^ lookup crc = nb.u2(crc) & nb.u2(0xFFFF) return crc
def crc16jit(buf, offset, crc, length)
CRC16 implementation acording to CCITT standards.
2.952525
3.013057
0.97991
for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3 crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
def crc16_nojit(s, crc=0)
CRC16 implementation acording to CCITT standards.
3.113472
3.117403
0.998739
header_offset = offset + self._header_len self.length = insert_payload(buf, header_offset, self.payload) struct.pack_into(self._header_fmt, buf, offset, self.preamble, self.msg_type, self.sender, self.length) crc_offset = header_offset + self.length preamble_bytes = 1 crc_over_len = self._header_len + self.length - preamble_bytes self.crc = crc16jit(buf, offset+1, 0, crc_over_len) struct.pack_into(self._crc_fmt, buf, crc_offset, self.crc) length = preamble_bytes + crc_over_len + self._crc_len return length
def _get_framed(self, buf, offset, insert_payload)
Returns the framed message and updates the CRC.
3.758869
3.669736
1.024288
buf = np.zeros(512, dtype=np.uint8) packed_len = self._get_framed(buf, 0, self._copy_payload) d = buf[:packed_len] return d.tobytes()
def pack(self)
Pack to framed binary message.
7.636281
5.684612
1.343325
return self._get_framed(buf, offset, write_payload)
def pack_into(self, buf, offset, write_payload)
Pack to framed binary message.
11.054238
6.716109
1.645929
p = SBP._parser.parse(d) assert p.preamble == SBP_PREAMBLE, "Invalid preamble 0x%x." % p.preamble return SBP(p.msg_type, p.sender, p.length, p.payload, p.crc)
def unpack(d)
Unpack and return a framed binary message.
5.395769
5.298295
1.018397
d = self.to_json_dict() return json.dumps(d, sort_keys=sort_keys)
def to_json(self, sort_keys=False)
Produce a JSON-encoded SBP message.
3.494733
2.665375
1.31116
d = json.loads(s) sbp = SBP.from_json_dict(d) return sbp
def from_json(s)
Given a JSON-encoded message, build an object.
3.584317
3.431764
1.044453
try: return self.handle.read(size) except (OSError, serial.SerialException): print() print("Piksi disconnected") print() self.handle.close() raise IOError
def read(self, size)
Read wrapper. Parameters ---------- size : int Number of bytes to read.
5.742726
6.679609
0.85974
try: return self.handle.write(s) except (OSError, serial.SerialException, serial.writeTimeoutError) as e: if e == serial.writeTimeoutError: print("sbp pyserial_driver: writeTimeoutError") return 0 else: print() print("Piksi disconnected") print() self.handle.close() raise IOError
def write(self, s)
Write wrapper. Parameters ---------- s : bytes Bytes to write
6.820313
7.338207
0.929425
value = comment_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
def commentify(value)
Builds a comment.
4.157332
4.025996
1.032622
if field.type_id == 'string': if 'size' in field.options: return "parser.getString(%d)" % field.options['size'].value else: return "parser.getString()" elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Parser return "parser.get" + field.type_id.capitalize() + "()" if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "parser.getArrayof%s(%d)" % (t.capitalize(), field.options['size'].value) else: return "parser.getArrayof%s()" % t.capitalize() else: if 'size' in field.options: return "parser.getArray(%s.class, %d)" % (t, field.options['size'].value) else: return "parser.getArray(%s.class)" % t else: # This is an inner class, call default constructor return "new %s().parse(parser)" % field.type_id
def parse_type(field)
Function to pull a type from the binary payload.
3.087137
3.073989
1.004277
if field.type_id == 'string': if 'size' in field.options: return "builder.putString(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putString(%s)" % field.identifier elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Builder return "builder.put%s(%s)" % (field.type_id.capitalize(), field.identifier) if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "builder.putArrayof%s(%s, %d)" % (t.capitalize(), field.identifier, field.options['size'].value) else: return "builder.putArrayof%s(%s)" % (t.capitalize(), field.identifier) else: if 'size' in field.options: return "builder.putArray(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putArray(%s)" % field.identifier else: return "%s.build(builder)" % field.identifier
def build_type(field)
Function to pack a type into the binary payload.
2.527368
2.467459
1.024279
path, module_name = package_spec.filepath java_template = jenv.get_template(TEMPLATE_NAME) module_path = "com." + package_spec.identifier yaml_filepath = "/".join(package_spec.filepath) + ".yaml" includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] for msg in package_spec.definitions: msg_name = classnameify(msg.identifier) if msg.sbp_id else msg.identifier l = "/".join(package_spec.filepath) destination_filename = "%s/com/%s/%s.java" % (output_dir, l , msg_name) # Create the output directory if it doesn't exist if not os.path.exists(os.path.dirname(destination_filename)): os.mkdir(os.path.dirname(destination_filename)) with open(destination_filename, 'w+') as f: print(destination_filename) f.write(java_template.render(m=msg, filepath=yaml_filepath, module_path=module_path, include=includes, description=package_spec.description))
def render_source(output_dir, package_spec, jenv=JENV)
Render and output
3.576713
3.570199
1.001825
destination_filename = output_dir + "/com/swiftnav/sbp/client/MessageTable.java" with open(destination_filename, 'w+') as f: print(destination_filename) f.write(jenv.get_template(TEMPLATE_TABLE_NAME).render(packages=packages))
def render_table(output_dir, packages, jenv=JENV)
Render and output dispatch table
4.822462
4.676627
1.031184
formatted = "" if type_map.get(f.type_id, None): return "%s('%s')" % (type_map.get(f.type_id), f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "string('%s', { length: %d })" % (f.identifier, f.options['size'].value) elif f.type_id == 'string': return "string('%s', { greedy: true })" % (f.identifier) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill size = f.options.get('size', None) size_fn = f.options.get('size_fn', None) field_type = type_map.get(f_.type_id, None) if field_type is None: field_type = "%s.prototype.parser" % f_.type_id else: field_type = "'%s'" % field_type if size is not None: d = { "'uint16'" : "'uint16le'", "'uint32'" : "'uint32le'", "'uint64'" : "'uint16le'", "'int16'" : "'int16le'", "'int32'" : "'int32le'", "'int64'" : "'int16le'" } field_type_arr = d.get(field_type, field_type) return "array('%s', { length: %d, type: %s })" % (f.identifier, size.value, field_type_arr) elif f.options.get('size_fn') is not None: return "array('%s', { type: %s, length: '%s' })" % (f_.identifier, field_type, size_fn.value) else: return "array('%s', { type: %s, readUntil: 'eof' })" % (f_.identifier, field_type) else: return "nest('%s', { type: %s.prototype.parser })" % (f.identifier, f.type_id) return formatted
def construct_format(f, type_map=CONSTRUCT_CODE)
Formats for binary-parser library.
2.38888
2.380095
1.003691
if not '_' in s: return s return ''.join(w[0].upper() + w[1:].lower() for w in s.split('_'))
def js_classnameify(s)
Makes a classname.
2.918857
2.74454
1.063514
p = MsgEphemerisGPSDepF._parser.parse(d) for n in self.__class__.__slots__: setattr(self, n, getattr(p, n))
def from_binary(self, d)
Given a binary payload d, update the appropriate payload fields of the message.
5.937289
4.740681
1.252413
c = containerize(exclude_fields(self)) self.payload = MsgEphemerisGPSDepF._parser.build(c) return self.pack()
def to_binary(self)
Produce a framed/packed SBP message.
26.52039
9.025851
2.93827
self.payload = containerize(exclude_fields(self)) self.parser = MsgEphemerisGPSDepF._parser self.stream_payload.reset(buf, offset) return self.pack_into(buf, offset, self._build_payload)
def into_buffer(self, buf, offset)
Produce a framed/packed SBP message into the provided buffer and offset.
7.063158
5.278352
1.338137
data = b"" while len(data) < size: d = self._read(size - len(data)) if self._broken: raise StopIteration if not d: # NOTE (Buro/jgross): Force a yield here to another thread. In # case the stream fails midstream, the spinning here causes # the UI thread to lock up without yielding. time.sleep(0) continue data += d return data
def _readall(self, size)
Read until all bytes are collected. Parameters ---------- size : int Number of bytes to read.
9.418118
10.559762
0.891887
# preamble - not readall(1) to allow breaking before messages, # empty input preamble = self._read(1) if not preamble: return None elif ord(preamble) != SBP_PREAMBLE: if self._verbose: print("Host Side Unhandled byte: 0x%02x" % ord(preamble)) return None # hdr hdr = self._readall(5) msg_crc = crc16(hdr) msg_type, sender, msg_len = struct.unpack("<HHB", hdr) # data data = self._readall(msg_len) msg_crc = crc16(data, msg_crc) # crc crc = self._readall(2) crc, = struct.unpack("<H", crc) if crc != msg_crc: if self._verbose: print("crc mismatch: 0x%04X 0x%04X" % (msg_crc, crc)) return None msg = SBP(msg_type, sender, msg_len, data, crc) try: msg = self._dispatch(msg) except Exception as exc: warnings.warn("SBP dispatch error: %s" % (exc,)) return msg
def _receive(self)
Read and build SBP message.
3.677711
3.584746
1.025933
try: return table[msg.msg_type](msg) except KeyError: warn = "No message found for msg_type id %d for msg %s." \ % (msg.msg_type, msg) warnings.warn(warn, RuntimeWarning) return msg except FormatFieldError: warnings.warn("SBP payload deserialization error! 0x%x" % msg.msg_type, RuntimeWarning) return msg
def dispatch(msg, table=_SBP_TABLE)
Dispatch an SBP message type based on its `msg_type` and parse its payload. Parameters ---------- driver : :class:`SBP` A parsed SBP object. table : dict Any table mapping unique SBP message type IDs to SBP message constructors. Returns ---------- SBP message with a parsed payload.
5.424821
4.787855
1.133038
for msg, metadata in self._source: if msg.msg_type: self._call(msg, **metadata) # Break any upstream iterators for sink in self._sinks: i = sink() if i is not None: i.breakiter() self._dead = True
def _recv_thread(self)
Internal thread to iterate over source messages and dispatch callbacks.
10.356998
8.36519
1.238107
if self._dead: return iter(()) iterator = Handler._SBPQueueIterator(maxsize) # We use a weakref so that the iterator may be garbage collected if it's # consumer no longer has a reference. ref = weakref.ref(iterator) self._sinks.append(ref) def feediter(msg, **metadata): i = ref() if i is not None: i(msg, **metadata) else: raise Handler._DeadCallbackException self.add_callback(feediter, msg_type) return iterator
def filter(self, msg_type=None, maxsize=0)
Get a filtered iterator of messages for synchronous, blocking use in another thread.
7.757
7.620781
1.017875
cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: self._callbacks[msg_type_].add(callback) else: self._callbacks[msg_type].add(callback)
def add_callback(self, callback, msg_type=None)
Add per message type or global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types.
3.184979
3.198267
0.995845
if msg_type is None: msg_type = self._callbacks.keys() cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: try: self._callbacks[msg_type_].remove(callback) except KeyError: pass else: self._callbacks[msg_type].remove(callback)
def remove_callback(self, callback, msg_type=None)
Remove per message type of global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to remove callback from. Default `None` means global callback. Iterable type removes the callback from all the message types.
2.505081
2.527599
0.991091
deadsinks = [] for i in self._sinks: if i() is None: deadsinks.append(i) for i in deadsinks: self._sinks.remove(i)
def _gc_dead_sinks(self)
Remove any dead weakrefs.
2.980044
2.428733
1.226995
if msg.msg_type: for callback in self._get_callbacks(msg.msg_type): try: callback(msg, **metadata) except Handler._DeadCallbackException: # The callback was an upstream iterator that has been garbage # collected. Remove it from our internal structures. self.remove_callback(callback) self._gc_dead_sinks() except SystemExit: raise except: import traceback traceback.print_exc()
def _call(self, msg, **metadata)
Process message with all callbacks (global and per message type).
6.225822
5.513027
1.129293
event = threading.Event() payload = {'data': None} def cb(sbp_msg, **metadata): payload['data'] = sbp_msg event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type) return payload['data']
def wait(self, msg_type, timeout=1.0)
Wait for a SBP message. Parameters ---------- msg_type : int SBP message type. timeout : float Waiting period
3.46655
3.809251
0.910035
event = threading.Event() def cb(msg, **metadata): callback(msg, **metadata) event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type)
def wait_callback(self, callback, msg_type=None, timeout=1.0)
Wait for a SBP message with a callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. timeout : float Waiting period
2.726409
3.467777
0.786212
try: return_val = self.handle.read(size) if return_val == '': print() print("Piksi disconnected") print() raise IOError return return_val except OSError: print() print("Piksi disconnected") print() raise IOError
def read(self, size)
Read wrapper. Parameters ---------- size : int Number of bytes to read.
3.992827
4.387791
0.909986
try: return self.handle.write(s) except OSError: print() print("Piksi disconnected") print() raise IOError
def write(self, s)
Write wrapper. Parameters ---------- s : bytes Bytes to write
9.045809
11.10989
0.814212
items = {k: v for k, v in list(obj.__dict__.items())} return "<%s: {%s}>" % (obj.__class__.__name__, pprint.pformat(items, width=1))
def fmt_repr(obj)
Return pretty printed string representation of an object.
3.595022
3.350577
1.072956
if sbp_msg.msg_type == SBP_MSG_SETTINGS_READ_RESP: section, setting, value = sbp_msg.payload.split(b'\0')[:3] self.settings.append((section, setting, value))
def capture_setting(self, sbp_msg, **metadata)
Callback to extract and store setting values from SBP_MSG_SETTINGS_READ_RESP Messages of any type other than SBP_MSG_SETTINGS_READ_RESP are ignored
5.373425
3.796274
1.415447
expire = time.time() + wait_time ok = False while not ok and time.time() < expire: settings = [x for x in self.settings if (x[0], x[1]) == (section, setting)] # Check to see if the last setting has the value we want if len(settings) > 0: ok = settings[-1][2] == value time.sleep(0.1) return ok
def wait_for_setting_value(self, section, setting, value, wait_time=5.0)
Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value
3.09543
3.157725
0.980272
match = [all((section is None or x_y_z[0] == section, setting is None or x_y_z[1] == setting, value is None or x_y_z[2] == value)) for x_y_z in self.settings] keep = [setting_remove for setting_remove in zip(self.settings,match) if not setting_remove[1]] self.settings[:] = [x[0] for x in keep]
def clear(self, section=None, setting=None, value=None)
Clear settings
3.726424
3.779774
0.985886
formatted = "" if type_map.get(f.type_id, None): return "'{identifier}' / {type_id}".format(type_id=type_map.get(f.type_id), identifier=f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "'{id}'/ construct.Bytes({size})".format(id=f.identifier, size=f.options['size'].value) elif f.type_id == 'string': return "'{id}' / construct.GreedyBytes".format(id=f.identifier) elif f.type_id == 'array' and f.options.get('size', None): fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill s = f.options.get('size', None).value return "'{id}' / construct.Array({size}, {type})".format(id=f.identifier, size=s, type=type_map.get(f_.type_id, 'construct.Byte')) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "construct.GreedyRange(%s)" % construct_format(f_) else: return "'%s' / construct.Struct(%s._parser)" % (f.identifier, f.type_id) return formatted
def construct_format(f, type_map=CONSTRUCT_CODE)
Formats for Construct.
2.598758
2.62352
0.990561
path, name = package_spec.filepath directory = output_dir destination_filename = "%s/%s.py" % (directory, name) py_template = jenv.get_template(TEMPLATE_NAME) module_path = ".".join(package_spec.identifier.split(".")[1:-1]) includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] print(destination_filename, includes) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, filepath="/".join(package_spec.filepath) + ".yaml", module_path=module_path, include=includes, timestamp=package_spec.creation_timestamp, description=package_spec.description))
def render_source(output_dir, package_spec, jenv=JENV)
Render and output
3.27872
3.292203
0.995905
data = None while True: try: data = self.handle.recv(size) except socket.timeout as socket_error: self._reconnect(socket_error) except socket.error as socket_error: # this is fine, just retry if socket_error.errno == errno.EINTR: continue self._reconnect(IOError) if not data: self._reconnect(IOError) break return data
def read(self, size)
Read wrapper. Parameters ---------- size : int Number of bytes to read
3.343761
3.891281
0.859296
try: self._write_lock.acquire() self.handle.sendall(s) except socket.timeout: self._connect() except socket.error: raise IOError finally: self._write_lock.release()
def write(self, s)
Write wrapper. Parameters ---------- s : bytes Bytes to write
3.502683
4.265282
0.821208
return dict([(k, getattr(obj, k)) for k in obj.__slots__ if k not in exclude])
def exclude_fields(obj, exclude=EXCLUDE)
Return dict of object without parent attrs.
3.914265
3.392893
1.153666
if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
def walk_json_dict(coll)
Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict
2.22733
2.321543
0.959418
if isinstance(coll, Container): [setattr(coll, k, containerize(v)) for (k, v) in coll.items()] return coll elif isinstance(coll, dict): return containerize(Container(**coll)) elif isinstance(coll, list): for j, i in enumerate(coll): if isinstance(i, dict): coll[j] = containerize(Container(**i)) return coll else: return coll
def containerize(coll)
Walk attribute fields passed from an SBP message and convert to Containers where appropriate. Needed for Construct proper serialization. Parameters ---------- coll : dict
2.297077
2.444993
0.939503
items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())] return "<%s: {%s}>" % (obj.__class__.__name__, ', '.join(items))
def fmt_repr(obj)
Print a orphaned string representation of an object without the clutter of its parent object.
3.496247
3.252626
1.0749
contents = None with open(filename, 'r') as f: contents = yaml.load(f) if contents is None: raise Exception("Empty yaml file: %s." % filename) try: s.package_schema(contents) except Exception as e: sys.stderr.write("Invalid SBP YAML specification: %s.\n" % filename) raise e return contents
def read_spec(filename, verbose=False)
Read an SBP specification. Parameters ---------- filename : str Local filename for specification. verbose : bool Print out some debugging info Returns ---------- Raises ---------- Exception On empty file. yaml.YAMLError On Yaml parsing error voluptuous.Invalid On invalid SBP schema
4.317743
3.513363
1.228949
file_index = {} base_dir = None if os.path.isfile(input_file): file_index[input_file] = None base_dir = os.path.dirname(input_file) elif os.path.isdir(input_file): base_dir = input_file for inf in glob.glob(input_file + s.SBP_EXTENSION): file_index[os.path.abspath(inf)] = None for inf in glob.glob(input_file + '/*'): base, index = get_files(os.path.abspath(inf)) z = file_index.copy() z.update(index) file_index = z return (base_dir, file_index)
def get_files(input_file)
Initializes an index of files to generate, returns the base directory and index.
2.808488
2.426055
1.157636
def flatten(tree, index = {}): for include in tree.get('include', []): fname = base_dir + "/" + include assert os.path.exists(fname), "File %s does not exist." % fname if fname not in index: index[fname] = read_spec(fname) index.update(flatten(index[fname], file_index)) return index for fname, contents in file_index.items(): file_index[fname] = read_spec(fname) file_index.update(flatten(file_index[fname], file_index)) return file_index
def resolve_deps(base_dir, file_index)
Given a base directory and an initial set of files, retrieves dependencies and adds them to the file_index.
2.792281
2.794226
0.999304
package = contents.get('package', None) description = contents.get('description', None) include = contents.get('include', []) definitions = contents.get('definitions', []) resolved = [mk_definition(defn) for defn in definitions] return sbp.PackageSpecification(identifier=package, description=description, includes=include, definitions=resolved, render_source=contents.get('render_source', True), stable=contents.get('stable', False), public=contents.get('public', True))
def mk_package(contents)
Instantiates a package specification from a parsed "AST" of a package. Parameters ---------- contents : dict Returns ---------- PackageSpecification
3.743658
3.71432
1.007899
assert len(defn) == 1 identifier, contents = next(iter(defn.items())) fs = [mk_field(f) for f in contents.get('fields', [])] return sbp.resolve_type(sbp.Definition(identifier=identifier, sbp_id=contents.get('id', None), short_desc=contents.get('short_desc', None), desc=contents.get('desc', None), type_id=contents.get('type'), fields=fs, public=contents.get('public', True)))
def mk_definition(defn)
Instantiates a struct or SBP message specification from a parsed "AST" of a struct or message. Parameters ---------- defn : dict Returns ---------- A Definition or a specialization of a definition, like a Struct
3.94898
4.019032
0.98257
assert len(field) == 1 identifier, contents = next(iter(field.items())) contents = dict(list({'units': '', 'n_with_values': 0}.items()) + list(contents.items())) return sbp.resolve_type(sbp.Field(identifier=identifier, type_id=contents.pop('type'), options=contents))
def mk_field(field)
Instantiates a field specification from a parsed "AST" of a field. Parameters ---------- field : dict Returns ---------- A Field or a specialization of a field, like a bitfield.
8.057824
7.711648
1.04489
if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: s = "".join([i.capitalize() for i in s.split("_")]) return s[0].lower() + s[1:]
def to_global(s)
Format a global variable name.
4.894256
4.593795
1.065406
if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: return "".join([i.capitalize() for i in s.split("_")]) return s
def to_data(s)
Format a data variable name.
7.044658
6.025152
1.169208
name = f.type_id if name.startswith('GPSTime'): name = 'Gps' + name[3:] if type_map.get(name, None): return type_map.get(name, None) elif name == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "[%s]" % to_type(f_) return name
def to_type(f, type_map=CONSTRUCT_CODE)
Format a the proper type.
5.037377
4.826898
1.043606
path, name = package_spec.filepath module_prefix = "SwiftNav.SBP" module_name = camel_case(name) full_module_name = ".".join([module_prefix, module_name]) destination_filename = "%s/src/SwiftNav/SBP/%s.hs" % (output_dir, module_name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) module_includes = [".".join([module_prefix] + [camel_case(j) for j in i.split(".")[:-1]]) for i in package_spec.includes] with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, description=package_spec.description, module_name=full_module_name, module_includes=module_includes))
def render_source(output_dir, package_spec)
Render and output to a directory given a package specification.
4.054918
3.989328
1.016442
if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
def to_comment(value)
Builds a comment.
3.920958
3.455993
1.134539
if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
def to_identifier(s)
Convert snake_case to camel_case.
5.273626
4.505774
1.170415
path, name = package_spec.filepath destination_filename = '%s/%s.proto' % (output_dir, name) pb_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) includes = [include[:-5] if include.endswith('.yaml') else include for include in package_spec.includes] if 'types' in includes: includes.remove('types') with open(destination_filename, 'w') as f: f.write(pb_template.render( name=name, package=package_spec.identifier, messages=package_spec.definitions, includes=includes, description=package_spec.description, ))
def render_source(output_dir, package_spec)
Render and output to a directory given a package specification.
3.516729
3.499157
1.005022
''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the app resources container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. If the app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_app_resource(name="Indexed genome", classname='file') dxpy.download_dxfile(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_RESOURCES_ID' not in os.environ and 'DX_PROJECT_CONTEXT_ID' not in os.environ: raise DXError('App resources container ID could not be found') kwargs['project'] = os.environ.get('DX_RESOURCES_ID', os.environ.get('DX_PROJECT_CONTEXT_ID')) kwargs['return_handler'] = True return find_one_data_object(**kwargs)
def load_app_resource(**kwargs)
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the app resources container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. If the app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_app_resource(name="Indexed genome", classname='file') dxpy.download_dxfile(x)
4.70253
1.43575
3.275313
''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the project cache container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') kwargs['project'] = os.environ.get('DX_PROJECT_CACHE_ID') kwargs['return_handler'] = True cached_object = find_one_data_object(**kwargs) if cached_object is None: return None return cached_object.clone(dxpy.WORKSPACE_ID)
def load_from_cache(**kwargs)
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the project cache container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x)
4.550282
1.514978
3.00353
''' :param dxobject: a dxpy object handler for an object to save to the cache :raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables Clones the given object to the project cache. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') dxobject.clone(os.environ.get('DX_PROJECT_CACHE_ID'))
def save_to_cache(dxobject)
:param dxobject: a dxpy object handler for an object to save to the cache :raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables Clones the given object to the project cache. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x)
4.680925
1.617808
2.893375
''' Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method, translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags, properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args. ''' project = kwargs.get('project') or dxpy.WORKSPACE_ID run_input = {"input": executable_input} for arg in ['folder', 'name', 'tags', 'properties', 'details']: if kwargs.get(arg) is not None: run_input[arg] = kwargs[arg] if kwargs.get('instance_type') is not None or kwargs.get('cluster_spec') is not None: instance_type_srd = SystemRequirementsDict.from_instance_type(kwargs.get('instance_type')) cluster_spec_srd = SystemRequirementsDict(kwargs.get('cluster_spec')) run_input["systemRequirements"] = (instance_type_srd + cluster_spec_srd).as_dict() if kwargs.get('depends_on') is not None: run_input["dependsOn"] = [] if isinstance(kwargs['depends_on'], list): for item in kwargs['depends_on']: if isinstance(item, DXJob) or isinstance(item, DXDataObject): if item.get_id() is None: raise DXError('A dxpy handler given in depends_on does not have an ID set') run_input["dependsOn"].append(item.get_id()) elif isinstance(item, basestring): run_input['dependsOn'].append(item) else: raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings') else: raise DXError('Expected depends_on field to be a list') if kwargs.get('delay_workspace_destruction') is not None: run_input["delayWorkspaceDestruction"] = kwargs['delay_workspace_destruction'] if kwargs.get('allow_ssh') is not None: run_input["allowSSH"] = kwargs['allow_ssh'] if kwargs.get('debug') is not None: run_input["debug"] = kwargs['debug'] if kwargs.get('priority') is not None: run_input["priority"] = kwargs['priority'] if kwargs.get('ignore_reuse') is not None: run_input["ignoreReuse"] = kwargs['ignore_reuse'] if dxpy.JOB_ID is None: run_input["project"] = project if kwargs.get('extra_args') is not None: merge(run_input, kwargs['extra_args']) return run_input
def _get_run_input_common_fields(executable_input, **kwargs)
Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method, translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags, properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args.
2.839591
1.913557
1.483934
''' Takes the same arguments as the run method. Creates an input hash for the /applet-xxxx/run method. ''' # Although it says "for_applet", this is factored out of # DXApplet because apps currently use the same mechanism for unsupported_arg in ['stage_instance_types', 'stage_folders', 'rerun_stages', 'ignore_reuse_stages']: if kwargs.get(unsupported_arg): raise DXError(unsupported_arg + ' is not supported for applets (only workflows)') return DXExecutable._get_run_input_common_fields(executable_input, **kwargs)
def _get_run_input_fields_for_applet(executable_input, **kwargs)
Takes the same arguments as the run method. Creates an input hash for the /applet-xxxx/run method.
13.813437
8.223123
1.679829
''' :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called. ''' for field in 'runSpec', 'dxapi': if field not in kwargs: raise DXError("%s: Keyword argument %s is required" % (self.__class__.__name__, field)) dx_hash[field] = kwargs[field] del kwargs[field] for field in 'inputSpec', 'outputSpec', 'access', 'title', 'summary', 'description': if field in kwargs: dx_hash[field] = kwargs[field] del kwargs[field] resp = dxpy.api.applet_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
def _new(self, dx_hash, **kwargs)
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called.
3.461895
1.363116
2.539693
# Rename applet_input arg to preserve API compatibility when calling # DXApplet.run(applet_input=...) return super(DXApplet, self).run(applet_input, *args, **kwargs)
def run(self, applet_input, *args, **kwargs)
Creates a new job that executes the function "main" of this applet with the given input *applet_input*. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for the available args.
6.729471
6.280575
1.071474
''' Sets the environment variables for this process from arguments (argparse.Namespace) and calls dxpy._initialize() to reset any values that it has already set. ''' args = vars(args) if args.get('apiserver_host') is not None: config['DX_APISERVER_HOST'] = args['apiserver_host'] if args.get('apiserver_port') is not None: config['DX_APISERVER_PORT'] = args['apiserver_port'] if args.get('apiserver_protocol') is not None: config['DX_APISERVER_PROTOCOL'] = args['apiserver_protocol'] if args.get('project_context_id') is not None: config['DX_PROJECT_CONTEXT_ID'] = args['project_context_id'] if args.get('workspace_id') is not None: config['DX_WORKSPACE_ID'] = args['workspace_id'] if args.get('cli_wd') is not None: config['DX_CLI_WD'] = args['cli_wd'] if args.get('security_context') is not None: config['DX_SECURITY_CONTEXT'] = args['security_context'] if args.get('auth_token') is not None: config['DX_SECURITY_CONTEXT'] = json.dumps({"auth_token": args['auth_token'], "auth_token_type": "Bearer"})
def set_env_from_args(args)
Sets the environment variables for this process from arguments (argparse.Namespace) and calls dxpy._initialize() to reset any values that it has already set.
2.205848
1.696367
1.300337
if 'name' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "name".') # TODO: this default is not a good idea, and we will have to remove it once we ask customers to always provide release if 'release' not in asset_conf: asset_conf['release'] = "12.04" elif asset_conf['release'] not in ['16.04', '14.04', '12.04']: raise AssetBuilderException('The "release" field value should be either "12.04" (DEPRECATED), "14.04", "16.04".') if 'version' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "version". ') if 'title' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "title". ') if 'description' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "description".') if 'distribution' in asset_conf: if asset_conf['distribution'] != 'Ubuntu': raise AssetBuilderException('The distribution may only take the value "Ubuntu".') else: asset_conf['distribution'] = "Ubuntu"
def validate_conf(asset_conf)
Validates the contents of the conf file and makes sure that the required information is provided. { "name": "asset_library_name", "title": "A human readable name", "description": " A detailed description abput the asset", "version": "0.0.1", "distribution": "Ubuntu",# (Optional) "release": "12.04", "execDepends": [ {"name": "samtools", "package_manager": "apt"}, {"name": "bamtools"}, {"name": "bio", "package_manager": "gem", "version": "1.4.3"}, {"name": "pysam","package_manager": "pip", "version": "0.7.4"}, {"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"} ] }
2.601342
2.31627
1.123074
if os.path.isdir(os.path.join(src_dir, "resources")): temp_dir = tempfile.mkdtemp() try: resource_file = os.path.join(temp_dir, asset_name + "_resources.tar.gz") cmd = ["tar", "-czf", resource_file, "-C", os.path.join(src_dir, "resources"), "."] subprocess.check_call(cmd) file_id = dx_upload(resource_file, dest_project, dest_folder, json_out) return file_id finally: shutil.rmtree(temp_dir)
def get_asset_tarball(asset_name, src_dir, dest_project, dest_folder, json_out)
If the src_dir contains a "resources" directory its contents are archived and the archived file is uploaded to the platform
2.139537
1.951809
1.096181
''' :param choices: Strings between which the user will make a choice :type choices: list of strings :param default: Number the index to be used as the default :type default: int or None :param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique :type str_choices: list of strings :param prompt: A custom prompt to be used :type prompt: string :param allow_mult: Whether "*" is a valid option to select all choices :type allow_mult: boolean :param more_choices: Whether "m" is a valid option to ask for more options :type more_choices: boolean :returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True) :rtype: int or string :raises: :exc:`EOFError` to signify quitting the process At most one of allow_mult and more_choices should be set to True. ''' for i in range(len(choices)): prefix = str(i) + ') ' lines = choices[i].split("\n") joiner = "\n" + " " * len(prefix) print(prefix + joiner.join(lines)) if more_choices: print('m) More options not shown...') print('') if prompt is None: prompt = 'Pick a numbered choice' if allow_mult: prompt += ' or "*" for all' elif more_choices: prompt += ' or "m" for more options' if default is not None: prompt += ' [' + str(default) + ']' prompt += ': ' while True: try: value = input(prompt) except KeyboardInterrupt: print('') raise except EOFError: print('') raise if default is not None and value == '': return default if allow_mult and value == '*': return value if more_choices and value == 'm': return value try: choice = str_choices.index(value) return choice except: pass try: choice = int(value) if choice not in range(len(choices)): raise IndexError() return choice except Exception: print('Not a valid selection')
def pick(choices, default=None, str_choices=None, prompt=None, allow_mult=False, more_choices=False)
:param choices: Strings between which the user will make a choice :type choices: list of strings :param default: Number the index to be used as the default :type default: int or None :param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique :type str_choices: list of strings :param prompt: A custom prompt to be used :type prompt: string :param allow_mult: Whether "*" is a valid option to select all choices :type allow_mult: boolean :param more_choices: Whether "m" is a valid option to ask for more options :type more_choices: boolean :returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True) :rtype: int or string :raises: :exc:`EOFError` to signify quitting the process At most one of allow_mult and more_choices should be set to True.
3.044032
1.654105
1.84029
''' :param obj_id: object ID :type obj_id: str :param proj_id: project ID :type proj_id: str Returns True if the specified data object can be found in the specified project. ''' if obj_id is None: raise ValueError("Expected obj_id to be a string") if proj_id is None: raise ValueError("Expected proj_id to be a string") if not is_container_id(proj_id): raise ValueError('Expected %r to be a container ID' % (proj_id,)) return try_call(dxpy.DXHTTPRequest, '/' + obj_id + '/describe', {'project': proj_id})['project'] == proj_id
def object_exists_in_project(obj_id, proj_id)
:param obj_id: object ID :type obj_id: str :param proj_id: project ID :type proj_id: str Returns True if the specified data object can be found in the specified project.
2.970069
2.475971
1.199557
''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the last occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return -1 num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: return pos return -1
def get_last_pos_of_char(char, string)
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the last occurrence of *char* in *string* in which *char* is not present as an escaped character.
2.858227
1.566454
1.824648
''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' first_pos = -1 pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return first_pos num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: first_pos = pos return first_pos
def get_first_pos_of_char(char, string)
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character.
2.850127
1.587784
1.795034
''' :param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements. ''' words = [] pos = len(string) lastpos = pos while pos >= 0: pos = get_last_pos_of_char(char, string[:lastpos]) if pos >= 0: if pos + 1 != lastpos or include_empty_strings: words.append(string[pos + 1: lastpos]) lastpos = pos if lastpos != 0 or include_empty_strings: words.append(string[:lastpos]) words.reverse() return words
def split_unescaped(char, string, include_empty_strings=False)
:param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements.
3.353982
1.973446
1.699556
''' :param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such. ''' folders = split_unescaped('/', path) if len(folders) == 0: return '/', None if expected == 'folder' or folders[-1] == '.' or folders[-1] == '..' or get_last_pos_of_char('/', path) == len(path) - 1: entity_name = None else: entity_name = unescape_name_str(folders.pop()) sanitized_folders = [] for folder in folders: if folder == '.': pass elif folder == '..': if len(sanitized_folders) > 0: sanitized_folders.pop() else: sanitized_folders.append(unescape_folder_str(folder)) return ('/' + '/'.join(sanitized_folders)), entity_name
def clean_folder_path(path, expected=None)
:param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such.
4.295124
1.811305
2.371287
''' :param raw_string: A potential project or container ID or name :type raw_string: string :param is_error: Whether to raise an exception if the project or container ID cannot be resolved :type is_error: boolean :returns: Project or container ID if found or else None :rtype: string or None :raises: :exc:`ResolutionError` if *is_error* is True and the project or container could not be resolved Unescapes and attempts to resolve *raw_string* to a project or container ID. ''' string = unescape_name_str(raw_string) if is_container_id(string): return ([string] if multi else string) if string in cached_project_names: return ([cached_project_names[string]] if multi else cached_project_names[string]) try: results = list(dxpy.find_projects(name=string, describe=True, level='VIEW')) except Exception as details: raise ResolutionError(str(details)) if len(results) == 1: cached_project_names[string] = results[0]['id'] return ([results[0]['id']] if multi else results[0]['id']) elif len(results) == 0: if is_error: raise ResolutionError('Could not find a project named "' + string + '"') return ([] if multi else None) elif not multi: if INTERACTIVE_CLI: print('Found multiple projects with name "' + string + '"') choice = pick(['{id} ({level})'.format(id=result['id'], level=result['level']) for result in results]) return results[choice]['id'] else: raise ResolutionError('Found multiple projects with name "' + string + '"; please use a project ID to specify the desired project') else: # len(results) > 1 and multi return [result['id'] for result in results]
def resolve_container_id_or_name(raw_string, is_error=False, multi=False)
:param raw_string: A potential project or container ID or name :type raw_string: string :param is_error: Whether to raise an exception if the project or container ID cannot be resolved :type is_error: boolean :returns: Project or container ID if found or else None :rtype: string or None :raises: :exc:`ResolutionError` if *is_error* is True and the project or container could not be resolved Unescapes and attempts to resolve *raw_string* to a project or container ID.
2.894683
2.150059
1.346327
if entity_name is None: # Definitely a folder (or project) # TODO: find a good way to check if folder exists and expected=folder return False, project, folderpath, None elif is_hashid(entity_name): found_valid_class = True if expected_classes is not None: found_valid_class = False for klass in expected_classes: if entity_name.startswith(klass): found_valid_class = True if not found_valid_class: return False, None, None, None if describe is True: describe = {} # entity is an ID of a valid class, try to describe it if 'project' not in describe: if project != dxpy.WORKSPACE_ID: describe['project'] = project elif dxpy.WORKSPACE_ID is not None: describe['project'] = dxpy.WORKSPACE_ID try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) desc = dxpy.append_underlying_workflow_describe(desc) except Exception as details: if 'project' in describe: # Now try it without the hint del describe['project'] try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) except Exception as details2: raise ResolutionError(str(details2)) else: raise ResolutionError(str(details)) result = {"id": entity_name, "describe": desc} if enclose_in_list: return False, project, folderpath, [result] else: return False, project, folderpath, result else: # Need to resolve later return True, project, folderpath, entity_name
def _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=None, describe=True, enclose_in_list=False)
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param describe: Dictionary of inputs to the describe API call; if no describe input is provided (default value True), then an empty mapping is passed to the describe API method :type describe: dict or True :param enclose_in_list: Whether the describe output is to be in the form of a list (if False, the last return value is a dictionary; if True, the last return value is a list of one dictionary); it will only have an effect if entity_name is a DX ID and is described :type enclose_in_list: boolean :returns: Whether or not the entity needs to be resolved with a more general resolution method, the project, the folderpath, and the entity name :rtype: tuple of 4 elements :raises: ResolutionError if the entity fails to be described Attempts to resolve the entity to a folder or an object, and describes the entity iff it is a DX ID of an expected class in the list expected_classes. Otherwise, determines whether or not more general resolution may be able to resolve the entity. If a more general resolution method is needed, then the return values will look like: (True, <project>, <folderpath>, <entity_name>) If the entity is a DX ID, but is not one of the supplied expected classes, then the return values will look like: (False, None, None, None) If the entity can be successfully described, then the return values will look like: <desc_output> ::= {"id": entity_name, "describe": {...}} <desc_or_desc_list> ::= <desc_output> || [<desc_output>] (False, <project>, <folderpath>, <desc_or_desc_list>) If the entity may be a folder, then the return values will look like: (False, <project>, <folderpath>, None) TODO: Allow arbitrary flags for the describe mapping.
3.524518
3.112825
1.132257
if '/' in folder_name: # Then there's no way it's supposed to be a folder raise ResolutionError('Object of name ' + str(folder_name) + ' could not be resolved in folder ' + str(parent_folder) + ' of project ID ' + str(project)) possible_folder, _skip = clean_folder_path(parent_folder + '/' + folder_name, 'folder') if not check_folder_exists(project, parent_folder, folder_name): raise ResolutionError('Unable to resolve "' + folder_name + '" to a data object or folder name in \'' + parent_folder + "'") return possible_folder
def _resolve_folder(project, parent_folder, folder_name)
:param project: The project that the folder belongs to :type project: string :param parent_folder: Full path to the parent folder that contains folder_name :type parent_folder: string :param folder_name: Name of the folder :type folder_name: string :returns: The path to folder_name, if it exists, in the form of "<parent_folder>/<folder_name>" :rtype: string :raises: ResolutionError if folder_name is not a folder, or if folder_name points to a folder that does not exist Attempts to resolve folder_name at location parent_folder in project.
5.594998
5.289684
1.057719
if len(results) == 0: raise ValueError("'results' must be nonempty.") # Caller wants ALL results, so return the entire results list # At this point, do not care about the values of allow_mult or all_mult if not ask_to_resolve: return results if len(results) > 1: # The other way the caller can specify it wants all results is by setting # allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern) if allow_mult and (all_mult or is_glob_pattern(entity_name)): return results if INTERACTIVE_CLI: print('The given path "' + path + '" resolves to the following data objects:') if any(['describe' not in result for result in results]): # findDataObject API call must be made to get 'describe' mappings project, folderpath, entity_name = resolve_path(path, expected='entity') results = _resolve_global_entity(project, folderpath, entity_name) choice = pick([get_ls_l_desc(result['describe']) for result in results], allow_mult=allow_mult) if allow_mult and choice == '*': return results else: return [results[choice]] if allow_mult else results[choice] else: raise ResolutionError('The given path "' + path + '" resolves to ' + str(len(results)) + ' data objects') else: return [results[0]] if allow_mult else results[0]
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False, ask_to_resolve=True)
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned.
5.742458
5.267312
1.090207
if is_job_id(project_or_job_id): if describe is True: describe = {} # The following function call will raise a ResolutionError if no results # could be found. # If the call is successful, then the project will be incorporated into the # "describe" mapping of the returned dictionaries. return resolve_job_ref(project_or_job_id, entity_name, describe=describe) else: try: return list(dxpy.find_data_objects(project=project_or_job_id, folder=folderpath, name=entity_name, name_mode='glob', recurse=False, describe=describe, visibility=visibility)) except Exception as details: raise ResolutionError(str(details))
def _resolve_global_entity(project_or_job_id, folderpath, entity_name, describe=True, visibility="either")
:param project_or_job_id: The project ID to which the entity belongs (then the entity is an existing data object), or the job ID to which the entity belongs (then the entity is a job-based object reference to an object that may not exist yet) :type project_or_job_id: string :param folderpath: Full path to the object (parsed from command line) :type folderpath: string :param entity_name: Name of the object :type entity_name: string :param describe: Input mapping used to describe the job's project if project_or_job_id is a job ID, or True if the input mapping is to be empty :type describe: dict or True :param visibility: The expected visibility of the entity ("either", "hidden", or "visible"); to be used in resolution :type visibility: string :returns: The results obtained from attempting to resolve the entity; the expected format of the return value is described below :rtype: list :raises: ResolutionError if dxpy.find_data_objects throws an error If project_or_job_id is a job ID, then return value will be like: [{"id": ..., "describe": {...}}, ...] Otherwise, the return value will be like: [{"id": ..., "project": ..., "describe": {...}}, ...] Note that if the entity is successfully resolved, then the "describe" key will be in the dictionary if and only if a nonempty describe mapping was provided. TODO: Inspect entity_name and conditionally treat it as a "glob" pattern. TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive
3.995737
3.363161
1.18809