_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28100
IvyUtils.do_resolve
train
def do_resolve(cls, executor, extra_args, ivyxml, jvm_options, workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, ivy_cache_classpath_filename, resolve_hash_name, workunit_factory, workunit_name): """Execute Ivy with the given ivy.xml and copies all relevant files into the workdir. This method does an Ivy resolve, which may be either a Pants resolve or a Pants fetch depending on whether there is an existing frozen resolution. After it is run, the Ivy reports are copied into the workdir at the paths specified by workdir_report_paths_by_conf along with a file containing a list of all the requested artifacts and their transitive dependencies. :param executor: A JVM executor to use to invoke ivy. :param extra_args: Extra arguments to pass to ivy. :param ivyxml: The input ivy.xml containing the dependencies to resolve. :param jvm_options: A list of jvm option strings to use for the ivy invoke, or None. :param workdir_report_paths_by_conf: A dict mapping confs to report paths in the workdir. :param confs: The confs used in the resolve. :param resolve_hash_name: The hash to use as the module name for finding the ivy report file. :param workunit_factory: A workunit factory for the ivy invoke, or None. :param workunit_name: A workunit name for the ivy invoke, or None. """ ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=workunit_factory) with safe_concurrent_creation(ivy_cache_classpath_filename) as raw_target_classpath_file_tmp: extra_args = extra_args or [] args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args with cls._ivy_lock: cls._exec_ivy(ivy, confs, ivyxml, args, jvm_options=jvm_options, executor=executor, workunit_name=workunit_name, workunit_factory=workunit_factory) if not os.path.exists(raw_target_classpath_file_tmp): raise cls.IvyError('Ivy failed to create classpath file at {}' .format(raw_target_classpath_file_tmp)) cls._copy_ivy_reports(workdir_report_paths_by_conf, confs, ivy_resolution_cache_dir, resolve_hash_name) logger.debug('Moved ivy classfile file to {dest}' .format(dest=ivy_cache_classpath_filename))
python
{ "resource": "" }
q28101
IvyUtils._hardlink_cachepath
train
def _hardlink_cachepath(cls, ivy_repository_cache_dir, inpath, hardlink_dir, outpath): """hardlinks all paths listed in inpath that are under ivy_repository_cache_dir into hardlink_dir. If there is an existing hardlink for a file under inpath, it is used rather than creating a new hardlink. Preserves all other paths. Writes the resulting paths to outpath. Returns a map of path -> hardlink to that path. """ safe_mkdir(hardlink_dir) # The ivy_repository_cache_dir might itself be a hardlink. In this case, ivy may return paths that # reference the realpath of the .jar file after it is resolved in the cache dir. To handle # this case, add both the hardlink'ed path and the realpath to the jar to the hardlink map. real_ivy_cache_dir = os.path.realpath(ivy_repository_cache_dir) hardlink_map = OrderedDict() inpaths = cls._load_classpath_from_cachepath(inpath) paths = OrderedSet([os.path.realpath(path) for path in inpaths]) for path in paths: if path.startswith(real_ivy_cache_dir): hardlink_map[path] = os.path.join(hardlink_dir, os.path.relpath(path, real_ivy_cache_dir)) else: # This path is outside the cache. We won't hardlink it. hardlink_map[path] = path # Create hardlinks for paths in the ivy cache dir. for path, hardlink in six.iteritems(hardlink_map): if path == hardlink: # Skip paths that aren't going to be hardlinked. continue safe_mkdir(os.path.dirname(hardlink)) safe_hardlink_or_copy(path, hardlink) # (re)create the classpath with all of the paths with safe_open(outpath, 'w') as outfile: outfile.write(':'.join(OrderedSet(hardlink_map.values()))) return dict(hardlink_map)
python
{ "resource": "" }
q28102
IvyUtils.xml_report_path
train
def xml_report_path(cls, resolution_cache_dir, resolve_hash_name, conf): """The path to the xml report ivy creates after a retrieve. :API: public :param string cache_dir: The path of the ivy cache dir used for resolves. :param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for resolution. :param string conf: The ivy conf name (e.g. "default"). :returns: The report path. :rtype: string """ return os.path.join(resolution_cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME, resolve_hash_name, conf))
python
{ "resource": "" }
q28103
IvyUtils.parse_xml_report
train
def parse_xml_report(cls, conf, path): """Parse the ivy xml report corresponding to the name passed to ivy. :API: public :param string conf: the ivy conf name (e.g. "default") :param string path: The path to the ivy report file. :returns: The info in the xml report. :rtype: :class:`IvyInfo` :raises: :class:`IvyResolveMappingError` if no report exists. """ if not os.path.exists(path): raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path)) logger.debug("Parsing ivy report {}".format(path)) ret = IvyInfo(conf) etree = ET.parse(path) doc = etree.getroot() for module in doc.findall('dependencies/module'): org = module.get('organisation') name = module.get('name') for revision in module.findall('revision'): rev = revision.get('name') callers = [] for caller in revision.findall('caller'): callers.append(IvyModuleRef(caller.get('organisation'), caller.get('name'), caller.get('callerrev'))) for artifact in revision.findall('artifacts/artifact'): classifier = artifact.get('extra-classifier') ext = artifact.get('ext') ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev, classifier=classifier, ext=ext) artifact_cache_path = artifact.get('location') ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, tuple(callers)) ret.add_module(ivy_module) return ret
python
{ "resource": "" }
q28104
IvyUtils.generate_fetch_ivy
train
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name): """Generates an ivy xml with all jars marked as intransitive using the all conflict manager.""" org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] # Use org name _and_ rev so that we can have dependencies with different versions. This will # allow for batching fetching if we want to do that. jars_by_key = OrderedDict() for jar in jars: jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar) dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()] template_data = TemplateData(org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
python
{ "resource": "" }
q28105
IvyUtils.calculate_classpath
train
def calculate_classpath(cls, targets): """Creates a consistent classpath and list of excludes for the passed targets. It also modifies the JarDependency objects' excludes to contain all the jars excluded by provides. :param iterable targets: List of targets to collect JarDependencies and excludes from. :returns: A pair of a list of JarDependencies, and a set of excludes to apply globally. """ jars = OrderedDict() global_excludes = set() provide_excludes = set() targets_processed = set() # Support the ivy force concept when we sanely can for internal dep conflicts. # TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking # strategy generally. def add_jar(jar): # TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its # attachments (classified artifacts) at another. Ivy does not, allow this, the dependency # can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict # resolution happens at the classifier level, allowing skew in a # multi-artifact/multi-classifier dependency. We only find out about the skew later in # `_generate_jar_template` below which will blow up with a conflict. Move this logic closer # together to get a more clear validate, then emit ivy.xml then resolve flow instead of the # spread-out validations happening here. # See: https://github.com/pantsbuild/pants/issues/2239 coordinate = (jar.org, jar.name, jar.classifier) existing = jars.get(coordinate) jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing, proposed=jar) def collect_jars(target): if isinstance(target, JarLibrary): for jar in target.jar_dependencies: add_jar(jar) def collect_excludes(target): target_excludes = target.payload.get_field_value('excludes') if target_excludes: global_excludes.update(target_excludes) def collect_provide_excludes(target): if not (isinstance(target, ExportableJvmLibrary) and target.provides): return logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format( target.provides.org, target.provides.name, target)) provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name)) def collect_elements(target): targets_processed.add(target) collect_jars(target) collect_excludes(target) collect_provide_excludes(target) for target in targets: target.walk(collect_elements, predicate=lambda target: target not in targets_processed) # If a source dep is exported (ie, has a provides clause), it should always override # remote/binary versions of itself, ie "round trip" dependencies. # TODO: Move back to applying provides excludes as target-level excludes when they are no # longer global. if provide_excludes: additional_excludes = tuple(provide_excludes) new_jars = OrderedDict() for coordinate, jar in jars.items(): new_jars[coordinate] = jar.copy(excludes=jar.excludes + additional_excludes) jars = new_jars return list(jars.values()), global_excludes
python
{ "resource": "" }
q28106
MirroredTargetOptionMixin.get_scalar_mirrored_target_option
train
def get_scalar_mirrored_target_option(self, option_name, target): """Get the attribute `field_name` from `target` if set, else from this subsystem's options.""" mirrored_option_declaration = self._mirrored_option_declarations[option_name] return mirrored_option_declaration.get_mirrored_scalar_option_value(target)
python
{ "resource": "" }
q28107
Subsystem.scoped
train
def scoped(cls, optionable, removal_version=None, removal_hint=None): """Returns a dependency on this subsystem, scoped to `optionable`. :param removal_version: An optional deprecation version for this scoped Subsystem dependency. :param removal_hint: An optional hint to accompany a deprecation removal_version. Return value is suitable for use in SubsystemClientMixin.subsystem_dependencies(). """ return SubsystemDependency(cls, optionable.options_scope, removal_version, removal_hint)
python
{ "resource": "" }
q28108
Subsystem.scoped_instance
train
def scoped_instance(cls, optionable): """Returns an instance of this subsystem for exclusive use by the given `optionable`. :API: public :param optionable: An optionable type or instance to scope this subsystem under. :type: :class:`pants.option.optionable.Optionable` :returns: The scoped subsystem instance. :rtype: :class:`pants.subsystem.subsystem.Subsystem` """ if not isinstance(optionable, Optionable) and not issubclass(optionable, Optionable): raise TypeError('Can only scope an instance against an Optionable, given {} of type {}.' .format(optionable, type(optionable))) return cls._instance_for_scope(cls.subscope(optionable.options_scope))
python
{ "resource": "" }
q28109
_ServiceState.await_paused
train
def await_paused(self, timeout=None): """Blocks until the service is in the Paused state, then returns True. If a timeout is specified, the method may return False to indicate a timeout: with no timeout it will always (eventually) return True. Raises if the service is not currently in the Pausing state. """ deadline = time.time() + timeout if timeout else None with self._lock: # Wait until the service transitions out of Pausing. while self._state != self._PAUSED: if self._state != self._PAUSING: raise AssertionError('Cannot wait for {} to reach `{}` while it is in `{}`.'.format(self, self._PAUSED, self._state)) timeout = deadline - time.time() if deadline else None if timeout and timeout <= 0: return False self._condition.wait(timeout=timeout) return True
python
{ "resource": "" }
q28110
_ServiceState.maybe_pause
train
def maybe_pause(self, timeout=None): """Called by the service to indicate that it is pausable. If the service calls this method while the state is `Pausing`, the state will transition to `Paused`, and the service will block here until it is marked `Running` or `Terminating`. If the state is not currently `Pausing`, and a timeout is not passed, this method returns immediately. If a timeout is passed, this method blocks up to that number of seconds to wait to transition to `Pausing`. """ deadline = time.time() + timeout if timeout else None with self._lock: while self._state != self._PAUSING: # If we've been terminated, or the deadline has passed, return. timeout = deadline - time.time() if deadline else None if self._state == self._TERMINATING or not timeout or timeout <= 0: return # Otherwise, wait for the state to change. self._condition.wait(timeout=timeout) # Set Paused, and then wait until we are no longer Paused. self._set_state(self._PAUSED, self._PAUSING) while self._state == self._PAUSED: self._condition.wait()
python
{ "resource": "" }
q28111
_ServiceState.mark_pausing
train
def mark_pausing(self): """Requests that the service move to the Paused state, without waiting for it to do so. Raises if the service is not currently in the Running state. """ with self._lock: self._set_state(self._PAUSING, self._RUNNING)
python
{ "resource": "" }
q28112
_ServiceState.mark_running
train
def mark_running(self): """Moves the service to the Running state. Raises if the service is not currently in the Paused state. """ with self._lock: self._set_state(self._RUNNING, self._PAUSED)
python
{ "resource": "" }
q28113
Struct.kwargs
train
def kwargs(self): """Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate. """ return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
python
{ "resource": "" }
q28114
Struct.type_alias
train
def type_alias(self): """Return the type alias this target was constructed via. For a target read from a BUILD file, this will be target alias, like 'java_library'. For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'. The end result is that the type alias should be the most natural way to refer to this target's type to the author of the target instance. :rtype: string """ type_alias = self._kwargs.get(self._TYPE_ALIAS_FIELD, None) return type_alias if type_alias is not None else type(self).__name__
python
{ "resource": "" }
q28115
compile_file
train
def compile_file(source, globals_=None): """Compile by saving to file and importing that. Compiling the AST/source code this way ensures that the source code is readable by e.g. `pdb` or `inspect`. Args: source: The code to compile, either as a string or as an AST. globals_: A dictionary of variables that should be available as globals in the compiled module. They will be monkey patched after importing the module. Returns: A module object containing the compiled source code. """ if isinstance(source, gast.AST): source = quoting.to_source(source) # Write source to temporary file tempdir = tempfile.mkdtemp() uuid = str(uuid4().hex[:4]) tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid) with open(tmpname, 'w') as f: f.write(source) # Load the temporary file as a module module_name = 'tangent_%s' % uuid if six.PY3: spec = util.spec_from_file_location(module_name, tmpname) m = util.module_from_spec(spec) spec.loader.exec_module(m) else: m = imp.load_source(module_name, tmpname) # Update the modules namespace if globals_: m.__dict__.update(globals_) return m
python
{ "resource": "" }
q28116
compile_function
train
def compile_function(node, globals_=None): """Convert an AST or string into a function with inspectable source. This function uses `compile_file` internally, but instead of returning the entire module it will return the function only. Args: node: A `FunctionDef` node or a `Module` node which contains at least one `FunctionDef` node. If a module contains multiple functions, a handle to the first one will be returned. globals_: See `compile_file` Returns: A handle to the compiled function. Raises: TypeError: If the input is not a string or AST. ValueError: If no function can be found. """ if not isinstance(node, gast.AST): if not isinstance(node, six.string_types): raise TypeError node = gast.parse(node) if isinstance(node, gast.Module): for succ in node.body: if isinstance(succ, gast.FunctionDef): name = succ.name break else: raise ValueError('no function found') elif isinstance(node, gast.FunctionDef): name = node.name else: raise TypeError module = compile_file(node, globals_) return getattr(module, name)
python
{ "resource": "" }
q28117
autodiff_ast
train
def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose): """Perform AD on a single function and return the AST. Args: See `grad`. Returns: node: The AST of a module containing the adjoint and primal function definitions. required: A list of non-built in functions that this function called, and of which the primals and adjoints need to be made available in order for the returned function to run. """ node = annotate.resolve_calls(func) node = desugar.explicit_loop_indexes(node) fence.validate(node, inspect.getsource(func)) node = anf_.anf(node) if verbose >= 2: print('ANF') print(quoting.to_source(node)) if mode == 'reverse': node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result, check_dims) if verbose >= 2: print('RAW') print(quoting.to_source(node)) if motion == 'split': node = reverse_ad.split(node, stack) else: node = reverse_ad.joint(node) if verbose >= 2: print('MOTION') print(quoting.to_source(node)) elif mode == 'forward': node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result, check_dims) return node, required
python
{ "resource": "" }
q28118
autodiff_tree
train
def autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose): """Perform AD on all functions in a call tree. This function walks the call tree and differentiates each function in it. It also ensures that the global namespaces that each function in the call tree was in are merged. The `tangent` and `numpy` packages are added to the namespace here, so that the gradient templates can assume that they are present. Args: See `grad`. Returns: final: A single module which contains the primals and adjoints of all the functions in the call tree. namespace: A merged dictionary with all the variables in the global namespaces of each function. The primals and adjoints need access to these in order to execute. """ # Imported here to avoid circular imports import tangent namespace = {'tangent': tangent, 'numpy': numpy} done = set() final = gast.Module(body=[]) namespace.update(six.get_function_globals(func)) node, required = autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose) final.body.extend(node.body) to_do = set(required) if motion == 'split' and mode == 'reverse': done.add((func, wrt)) to_do -= done while to_do: func, wrt = to_do.pop() namespace.update(six.get_function_globals(func)) node, required = autodiff_ast( func=func, wrt=wrt, motion='split', mode=mode, preserve_result=True, check_dims=False, verbose=verbose) final.body.extend(node.body) done.add((func, wrt)) to_do.update(required) to_do -= done return final, namespace
python
{ "resource": "" }
q28119
vjp
train
def vjp(func, wrt=(0,), optimized=True, check_dims=True, preserve_result=False, verbose=0): """Convenience function to produce vector-Jacobian products. See `autodiff` for function arguments. Uses reverse-mode joint-motion autodiff to produce the VJP. """ return autodiff( func, wrt=wrt, motion='joint', mode='reverse', optimized=optimized, preserve_result=preserve_result, input_derivative=INPUT_DERIVATIVE.Required, check_dims=check_dims, verbose=verbose)
python
{ "resource": "" }
q28120
autodiff
train
def autodiff(func, wrt=(0,), optimized=True, motion='joint', mode='reverse', preserve_result=False, check_dims=True, input_derivative=INPUT_DERIVATIVE.Required, verbose=0): """Build the vector-Jacobian or Jacobian-vector product of a function `func`. For a vector-Jacobian product (reverse-mode autodiff): This function proceeds by finding the primals and adjoints of all the functions in the call tree. For a Jacobian-vector product (forward-mode autodiff): We first find the primals and tangents of all functions in the call tree. It then wraps the top level function (i.e. the one passed as `func`) in a slightly more user-friendly interface. It then compiles the function and attaches to it the global namespace it needs to run. Args: func: The function to take the gradient of. wrt: A tuple of argument indices to differentiate with respect to. By default the derivative is taken with respect to the first argument. optimized: Whether to optimize the gradient function (`True` by default). motion: Either 'split' (separate functions for forward and backward pass) or 'joint' motion (a single combined function). Joint mode is the default. mode: Either 'forward' or 'reverse' mode. Forward mode is more efficient when the input dimensionality is lower than the output dimensionality, whereas it is the opposite for reverse mode. input_derivative: An enum indicating whether the user must supply an input derivative, and if not, what the default value is. See the possible values of INPUT_DERIVATIVE in this file. preserve_result: A boolean indicating whether or not the generated gradient function should also return the output of the original function. If False, the return signature of the input and output functions will be > val = func(*args) > df = grad(func,preserve_result=False) > gradval = df(*args) If True, > val = func(*args) > df = grad(func,preserve_result=True) > gradval, val = df(*args) Note that if taking gradients with respect to multiple arguments, the primal value will be appended to the return signature. Ex: > val = func(x,y) > df = grad(func,wrt=(0,1),preserve_result=True) > dx,dy,val = df(x,y) verbose: If 1 the source code of the generated functions will be output to stdout at various stages of the process for debugging purposes. If > 1, all intermediate code generation steps will print. Returns: df: A function that calculates a derivative (see file-level documentation above for the kinds of derivatives available) with respect to arguments specified in `wrt`, using forward or reverse mode according to `mode`. If using reverse mode, the gradient is calculated in either split or joint motion according to the value passed in `motion`. If `preserve_result` is True, the function will also return the original result of `func`. """ # If the function had the with insert_grad_of statements removed, retrieve them func = getattr(func, 'tangent', func) # Generate the derivative node, namespace = autodiff_tree(func, wrt, motion, mode, preserve_result, check_dims, verbose) if mode == 'reverse' and motion == 'joint': # Pull the stack definition and initial gradient into the function body # TODO: Use first FunctionDef instead of first element node.body[0] = _create_joint(node.body[0], func, wrt, input_derivative) if verbose >= 2: print('INLINED') print(quoting.to_source(node)) if mode == 'forward': node = _create_forward(node) if optimized: # Optimize the resulting functions node = optimization.optimize(node) node = comments.remove_repeated_comments(node) if verbose >= 1: print(quoting.to_source(node)) # Compile and return module = compile_.compile_file(node, namespace) if mode == 'forward' or motion == 'joint': return getattr(module, node.body[0].name) else: # Compiling the top-level function in split mode makes no sense, but we use # it for testing; hence we don't care about the source being readable forward = getattr(module, node.body[0].name) backward = getattr(module, node.body[1].name) # Imported here to avoid circular imports import tangent def df(*args, **kwargs): _stack = tangent.Stack() init_grad = kwargs.pop('init_grad', 1.0) forward(_stack, *args, **kwargs) dx = backward(_stack, init_grad, *args, **kwargs) if len(dx) == 1: dx, = dx return dx return df
python
{ "resource": "" }
q28121
_create_joint
train
def _create_joint(fwdbwd, func, wrt, input_derivative): """Create a user-friendly gradient function. By default, gradient functions expect the stack to be passed to them explicitly. This function modifies the function so that the stack doesn't need to be passed and gets initialized in the function body instead. For consistency, gradient functions always return a tuple, even if the gradient of only one input was required. We unpack the tuple if it is of length one. Args: fwdbwd: An AST. The function definition of the joint primal and adjoint. func: A function handle. The original function that was differentiated. wrt: A tuple of integers. The arguments with respect to which we differentiated. Returns: The function definition of the new function. """ # Correct return to be a non-tuple if there's only one element retval = fwdbwd.body[-1] if len(retval.value.elts) == 1: retval.value = retval.value.elts[0] # Make a stack init statement init_stack = quoting.quote('%s = tangent.Stack()' % fwdbwd.args.args[0].id) init_stack = comments.add_comment(init_stack, 'Initialize the tape') # Prepend the stack init to the top of the function fwdbwd.body = [init_stack] + fwdbwd.body # Replace the function arguments with the original ones grad_name = fwdbwd.args.args[1].id fwdbwd.args = quoting.parse_function(func).body[0].args # Give the function a nice name fwdbwd.name = naming.joint_name(func, wrt) # Allow the initial gradient to be passed as a keyword argument fwdbwd = ast_.append_args(fwdbwd, [grad_name]) if input_derivative == INPUT_DERIVATIVE.DefaultOne: fwdbwd.args.defaults.append(quoting.quote('1.0')) return fwdbwd
python
{ "resource": "" }
q28122
_create_forward
train
def _create_forward(out_node): """Create a user-friendly forward function. Ensures that a single value instead of a tuple is returned if the user asked for the gradient with respect to only one input. Args: out_node: The function definition AST. Returns: The function definition with potentially changed return statement. """ retval = out_node.body[0].body[-1] if len(retval.value.elts) == 1: retval.value = retval.value.elts[0] return out_node
python
{ "resource": "" }
q28123
tangent
train
def tangent(f): """A decorator which removes the `with insert_grad_of` statement. This allows the function to be called as usual. Args: f: A function Returns: A function with any `with insert_grad_of` context managers removed. """ node = annotate.resolve_calls(f) RemoveWith().visit(node) wrapped = functools.wraps(f)(compile_.compile_function(node)) wrapped.tangent = f return wrapped
python
{ "resource": "" }
q28124
forward
train
def forward(node, analysis): """Perform a given analysis on all functions within an AST.""" if not isinstance(analysis, Forward): raise TypeError('not a valid forward analysis object') for succ in gast.walk(node): if isinstance(succ, gast.FunctionDef): cfg_obj = CFG.build_cfg(succ) analysis.visit(cfg_obj.entry) return node
python
{ "resource": "" }
q28125
CFG.backlink
train
def backlink(node): """Given a CFG with outgoing links, create incoming links.""" seen = set() to_see = [node] while to_see: node = to_see.pop() seen.add(node) for succ in node.next: succ.prev.add(node) if succ not in seen: to_see.append(succ)
python
{ "resource": "" }
q28126
CFG.set_head
train
def set_head(self, node): """Link this node to the current leaves.""" for head in self.head: head.next.add(node) self.head[:] = [] self.head.append(node)
python
{ "resource": "" }
q28127
CFG.build_cfg
train
def build_cfg(cls, node): """Build a CFG for a function. Args: node: A function definition the body of which to analyze. Returns: A CFG object. Raises: TypeError: If the input is not a function definition. """ if not isinstance(node, gast.FunctionDef): raise TypeError('input must be a function definition') cfg = cls() cfg.entry = Node(node.args) cfg.head = [cfg.entry] cfg.visit_statements(node.body) cfg.exit = Node(None) cfg.set_head(cfg.exit) cfg.backlink(cfg.entry) return cfg
python
{ "resource": "" }
q28128
optimize
train
def optimize(node): """Perform a series of optimization passes. This function performs a series of optimizations (dead code elimination, constant folding, variable folding) on the given AST. It optimizes the code repeatedly until reaching a fixed point. The fixed point is determine roughly by checking whether the number of lines of generated source code changed after the latest pass. Args: node: The AST to optimize. Returns: The optimized AST. """ node = dead_code_elimination(node) node = constant_folding(node) node = assignment_propagation(node) return node
python
{ "resource": "" }
q28129
dead_code_elimination
train
def dead_code_elimination(node): """Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST. """ to_remove = set(def_[1] for def_ in annotate.unused(node) if not isinstance(def_[1], (gast.arguments, gast.For))) for n in list(to_remove): for succ in gast.walk(n): if anno.getanno(succ, 'push', False): to_remove.add(anno.getanno(succ, 'push')) transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
python
{ "resource": "" }
q28130
read_counts
train
def read_counts(node): """Check how many times a variable definition was used. Args: node: An AST to analyze. Returns: A dictionary from assignment nodes to the number of times the assigned to variable was used. """ cfg.forward(node, cfg.ReachingDefinitions()) rc = ReadCounts() rc.visit(node) return rc.n_read
python
{ "resource": "" }
q28131
assignment_propagation
train
def assignment_propagation(node): """Perform assignment propagation. Assignment propagation is not a compiler optimization as much as a readability optimization. If a variable name is used only once, it gets renamed when possible e.g. `y = x; z = y` will become `z = x`. Args: node: The AST to optimize. Returns: The optimized AST. """ n_reads = read_counts(node) to_remove = [] for succ in gast.walk(node): # We found an assignment of the form a = b # - Left-hand side is a Name, right-hand side is a Name. if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and len(succ.targets) == 1 and isinstance(succ.targets[0], gast.Name)): rhs_name = succ.value.id # We now find all the places that b was defined rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in') if def_[0] == rhs_name] # If b was defined in only one place (not an argument), and wasn't used # anywhere else but in a == b, and was defined as b = x, then we can fold # the statements if (len(rhs_defs) == 1 and isinstance(rhs_defs[0], gast.Assign) and n_reads[rhs_defs[0]] == 1 and isinstance(rhs_defs[0].value, gast.Name) and isinstance(rhs_defs[0].targets[0], gast.Name)): # Mark rhs_def for deletion to_remove.append(rhs_defs[0]) # Propagate the definition succ.value = rhs_defs[0].value # Remove the definitions we folded transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
python
{ "resource": "" }
q28132
matmul_adjoint_x
train
def matmul_adjoint_x(dz, x, y, transpose_a, transpose_b): """Implementation of dtfmatmul wrt x, separate for readability.""" if not transpose_a and not transpose_b: return tf.matmul(dz, y, transpose_b=True) elif not transpose_a and transpose_b: return tf.matmul(dz, y) elif transpose_a and not transpose_b: return tf.matmul(y, dz, transpose_b=True) else: # transpose_a and transpose_b return tf.matmul(y, dz, transpose_a=True, transpose_b=True)
python
{ "resource": "" }
q28133
matmul_adjoint_y
train
def matmul_adjoint_y(dz, x, y, transpose_a, transpose_b): """Implementation of dtfmatmul, separate for readability.""" if not transpose_a and not transpose_b: return tf.matmul(x, dz, transpose_a=True) elif not transpose_a and transpose_b: return tf.matmul(dz, x, transpose_a=True) elif transpose_a and not transpose_b: return tf.matmul(x, dz) else: # transpose_a and transpose_b return tf.matmul(dz, x, transpose_a=True, transpose_b=True)
python
{ "resource": "" }
q28134
primal_name
train
def primal_name(func, wrt): """Name for the primal of a function.""" if not isinstance(func, types.FunctionType): raise TypeError(func) varnames = six.get_function_code(func).co_varnames return PRIMAL_NAME.format(func.__name__, ''.join(varnames[i] for i in wrt))
python
{ "resource": "" }
q28135
Namer.build
train
def build(cls, node): """Construct a namer object for a given function scope.""" if not isinstance(node, gast.FunctionDef): raise ValueError namer = cls() namer.names.update(get_names(node)) return namer
python
{ "resource": "" }
q28136
Namer.valid
train
def valid(self, name): """Ensure a variable name is valid. Note: Assumes variable names are ASCII, which isn't necessarily true in Python 3. Args: name: A proposed variable name. Returns: A valid version of the name. """ name = re.sub('[^0-9a-zA-Z_]', '', name) if re.match('[0-9]', name): name = '_' + name return name
python
{ "resource": "" }
q28137
Namer.trim
train
def trim(self, name): """When the name is too long, use the LHS or a random string instead.""" if len(name) > self.MAX_LENGTH and self.target: name = self.TEMP_VAR.format(self._name(self.target)) if len(name) > self.MAX_LENGTH: while True: name = '_{:04x}'.format(random.randint(0, 16 ** 4 - 1)) if name not in self.names: break return name
python
{ "resource": "" }
q28138
Namer.unique
train
def unique(self, name): """Make a variable name unique by appending a number if needed.""" # Make sure the name is valid name = self.valid(name) # Make sure it's not too long name = self.trim(name) # Now make sure it's unique unique_name = name i = 2 while unique_name in self.names: unique_name = name + str(i) i += 1 self.names.add(unique_name) return unique_name
python
{ "resource": "" }
q28139
array_size
train
def array_size(x, axis): """Calculate the size of `x` along `axis` dimensions only.""" axis_shape = x.shape if axis is None else tuple(x.shape[a] for a in axis) return max(numpy.prod(axis_shape), 1)
python
{ "resource": "" }
q28140
create_unbroadcast_axis
train
def create_unbroadcast_axis(shape, broadcast_shape): """Creates the reduction axis for unbroadcasting. Args: shape: A list. The shape after the broadcast operation. broadcast_shape: A list. The original shape the array being unbroadcast had. Returns: A list. The axes along which the array needs to be reduced. These axes will be distributed evenly into the original shape. """ return tuple( -(1 + i) for i in range(len(broadcast_shape)) if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)])
python
{ "resource": "" }
q28141
unreduce_array
train
def unreduce_array(array, shape, axis, keepdims): """Reverse summing over a dimension, NumPy implementation. Args: array: The array that was reduced. shape: The original shape of the array before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array. """ # NumPy uses a special default value for keepdims, which is equivalent to # False. if axis is not None and (not keepdims or keepdims is numpy._NoValue): # pylint: disable=protected-access if isinstance(axis, int): axis = axis, for ax in sorted(axis): array = numpy.expand_dims(array, ax) return numpy.broadcast_to(array, shape)
python
{ "resource": "" }
q28142
astype
train
def astype(array, y): """A functional form of the `astype` method. Args: array: The array or number to cast. y: An array or number, as the input, whose type should be that of array. Returns: An array or number with the same dtype as `y`. """ if isinstance(y, autograd.core.Node): return array.astype(numpy.array(y.value).dtype) return array.astype(numpy.array(y).dtype)
python
{ "resource": "" }
q28143
init_grad
train
def init_grad(obj, allow_lazy_initializer=False): """Initialize the gradient for an object. Args: obj: The object to initialize the gradient for, can be either a number, array, tuple, list, or dictionary. allow_lazy_initializer: Whether to allow using the ZeroGradient wrapper, for efficiency. Returns: An object of the same type, shape, etc. but with all numeric values set to zero. If the type is unknown, a zero is returned. """ if obj is None: # TODO: fixes.py appears to pass None value and expect 0.0 back. Bug? return 0.0 initializer, supports_lazy_initializer = grad_initializers[type(obj)] if supports_lazy_initializer: if isinstance(obj, ZeroGradient): if allow_lazy_initializer: return ZeroGradient(obj.like) else: # TODO: Not sure this should normally be hit. In forward-over-reverse? return obj.instantiate() else: if allow_lazy_initializer: return ZeroGradient(obj) else: assert not isinstance(obj, ZeroGradient) return initializer(obj)
python
{ "resource": "" }
q28144
register_add_grad
train
def register_add_grad(left_type, right_type, add_grad_function): """Register a new gradient adder supporting the given types. Gradient adders are used to add (in the sense of arithmetic addition) intermediate adjoint and tangent variables. TODO: Link to the document explaining the overall terminology and mechanics. Args: left_type: A Python type object. The data type of the left operand supported by the adder. right_type: A Python type object. The data type of the right operand supported by the adder. add_grad_function: A binary function that takes two arguments, left and right, of the types left_type and right_type respectively, and returns their sum. For example, the gradient adder for Numpy objects is np.add. Raises: ValueError: If the given type pair was already registered. """ key = (left_type, right_type) if key in grad_adders: raise ValueError('Types %s already mapped to %s' % (key, grad_adders[key])) grad_adders[key] = add_grad_function
python
{ "resource": "" }
q28145
add_grad
train
def add_grad(left, right): """Recursively add the gradient of two objects. Args: left: The left value to add. Can be either an array, a number, list or dictionary. right: The right value. Must be of the same type (recursively) as the left. Returns: The sum of the two gradients, which will of the same type. """ # We assume that initial gradients are always identity WRT add_grad. # We also assume that only init_grad could have created None values. assert left is not None and right is not None left_type = type(left) right_type = type(right) if left_type is ZeroGradient: return right if right_type is ZeroGradient: return left return grad_adders[(left_type, right_type)](left, right)
python
{ "resource": "" }
q28146
register_shape_checker
train
def register_shape_checker(left_type, right_type, shape_checker_function): """Register a new shape checking function supporting given types. Shape checkers are primarily used to make sure that the seed derivatives passed into generated autodiff functions match their corresponding primal values. Args: left_type: A Python type object. The data type of the left operand supported by the adder. right_type: A Python type object. The data type of the right operand supported by the adder. shape_checker_function: A binary function that takes two arguments, left and right, of the types left_type and right_type respectively, and returns a boolean indicating whether or not they match. Raises: ValueError: If the given type pair was already registered. """ key = (left_type, right_type) if key in shape_checkers: raise ValueError('Types %s already mapped to %s' % (key, shape_checkers[key])) shape_checkers[key] = shape_checker_function
python
{ "resource": "" }
q28147
shapes_match
train
def shapes_match(a, b): """Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match. """ if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
python
{ "resource": "" }
q28148
pop_stack
train
def pop_stack(stack, op_id): """Proxy of pop, where we know we're popping a stack off of a stack. We know that we don't need to differentiate through this. See pop() for more. Args: stack: The stack to pop from. op_id: A unique variable that is also passed into the matching push. Allows optimization passes to track pairs of pushes and pops. Returns: The last value. """ if __debug__: pushed_stack, pushed_op_id = stack.pop() assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id) else: pushed_stack = stack.pop() return pushed_stack
python
{ "resource": "" }
q28149
push_stack
train
def push_stack(stack, substack, op_id): """Proxy of push, where we know we're pushing a stack onto a stack. Used when differentiating call trees,where sub-functions get their own stack. See push() for more. Args: stack: The stack object, which must support appending values. substack: The stack to append. op_id: A unique variable that is also passed into the corresponding pop. Allows optimization passes to track pairs of pushes and pops. Raises: ValueError: If a non-stack value for `substack` is passed. """ if substack is not None and not isinstance(substack, Stack): raise ValueError( 'Substack should be type tangent.Stack or None, instead found %s' % type(substack)) if __debug__: stack.append((substack, op_id)) else: stack.append(substack)
python
{ "resource": "" }
q28150
grad_dot
train
def grad_dot(dy, x1, x2): """Gradient of NumPy dot product w.r.t. to the left hand side. Args: dy: The gradient with respect to the output. x1: The left hand side of the `numpy.dot` function. x2: The right hand side Returns: The gradient with respect to `x1` i.e. `x2.dot(dy.T)` with all the broadcasting involved. """ if len(numpy.shape(x1)) == 1: dy = numpy.atleast_2d(dy) elif len(numpy.shape(x2)) == 1: dy = numpy.transpose(numpy.atleast_2d(dy)) x2 = numpy.transpose(numpy.atleast_2d(x2)) x2_t = numpy.transpose(numpy.atleast_2d( numpy.sum(x2, axis=tuple(numpy.arange(numpy.ndim(x2) - 2))))) dy_x2 = numpy.sum(dy, axis=tuple(-numpy.arange(numpy.ndim(x2) - 2) - 2)) return numpy.reshape(numpy.dot(dy_x2, x2_t), numpy.shape(x1))
python
{ "resource": "" }
q28151
trace_grad
train
def trace_grad(fn, args): """Trace a function, and return a VJP and the function's output.""" from tensorflow.python.eager.backprop import make_vjp result, vjp = make_vjp(fn)(*args) return result, vjp
python
{ "resource": "" }
q28152
get_module_functions
train
def get_module_functions(modules): """Finds functions that do not have implemented derivatives. Args: modules: A list of Python modules. Functions contained in these modules will be checked for membership in 'implemented', and if not found, will be added to an 'unimplemented' set implemented: A Python object containing implemented derivatives. A function should be checkable for membership using the `fn in implemented` syntax. Returns: module_fns: A set of functions, builtins or ufuncs in `modules`. """ module_fns = set() for module in modules: for key in dir(module): attr = getattr(module, key) if isinstance( attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)): module_fns.add(attr) return module_fns
python
{ "resource": "" }
q28153
validate
train
def validate(node, source): """Call this function to validate an AST.""" # TODO: leaving strict checking off to support insert_grad_of lf = LanguageFence(source, strict=False) lf.visit(node) return node
python
{ "resource": "" }
q28154
get_name
train
def get_name(node): """Get the name of a variable. Args: node: A `Name`, `Subscript` or `Attribute` node. Returns: The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`. """ if isinstance(node, gast.Name): return node.id elif isinstance(node, (gast.Subscript, gast.Attribute)): return get_name(node.value) else: raise TypeError
python
{ "resource": "" }
q28155
get_updated
train
def get_updated(node): """Return the variable names created or mutated by this statement. This function considers assign statements, augmented assign statements, and the targets of for loops, as well as function arguments. For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and `y`, `for i in range(x)` will return `i`, etc. Args: node: An AST node Returns: A set of variable names (strings) of all the variables created or mutated. """ if isinstance(node, gast.Assign): return set.union(*(_get_target(target) for target in node.targets)) elif isinstance(node, (gast.For, gast.AugAssign)): return _get_target(node.target) elif isinstance(node, gast.arguments): targets = set(arg.id for arg in node.args + node.kwonlyargs) if node.vararg: targets.add(node.vararg.id) if node.kwarg: targets.add(node.kwarg.id) return targets else: return set()
python
{ "resource": "" }
q28156
copy_node
train
def copy_node(node): """Copy a node but keep its annotations intact.""" if not isinstance(node, gast.AST): return [copy_node(n) for n in node] new_node = copy.deepcopy(node) setattr(new_node, anno.ANNOTATION_FIELD, getattr(node, anno.ANNOTATION_FIELD, {}).copy()) return new_node
python
{ "resource": "" }
q28157
is_insert_grad_of_statement
train
def is_insert_grad_of_statement(node): """Check whether a context manager calls `insert_grad_of`. Args: node: The context manager node. Returns: Whether or not this node contains `insert_grad_of` calls. Raises: ValueError: If the `insert_grad_of` calls are mixed with other calls. """ tangent_calls = [anno.getanno(item.context_expr, 'func', None) is utils.insert_grad_of for item in node.items] if all(tangent_calls): return True elif any(tangent_calls): raise ValueError else: return False
python
{ "resource": "" }
q28158
add_comment
train
def add_comment(node, text, location='above'): """Add a comment to the given node. If the `SourceWithCommentGenerator` class is used these comments will be output as part of the source code. Note that a node can only contain one comment. Subsequent calls to `add_comment` will ovverride the existing comments. Args: node: The AST node whose containing statement will be commented. text: A comment string. location: Where the comment should appear. Valid values are 'above', 'below' and 'right' Returns: The node with the comment stored as an annotation. """ anno.setanno(node, 'comment', dict(location=location, text=text), safe=False) return node
python
{ "resource": "" }
q28159
remove_repeated_comments
train
def remove_repeated_comments(node): """Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence. """ last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if comment['text'] == last_comment['text']: anno.delanno(_node, 'comment') last_comment = comment return node
python
{ "resource": "" }
q28160
create_grad
train
def create_grad(node, namer, tangent=False): """Given a variable, create a variable for the gradient. Args: node: A node to create a gradient for, can be a normal variable (`x`) or a subscript (`x[i]`). namer: The namer object which will determine the name to use for the gradient. tangent: Whether a tangent (instead of adjoint) is created. Returns: node: A node representing the gradient with the correct name e.g. the gradient of `x[i]` is `dx[i]`. Note that this returns an invalid node, with the `ctx` attribute missing. It is assumed that this attribute is filled in later. Node has an `adjoint_var` annotation referring to the node it is an adjoint of. """ if not isinstance(node, (gast.Subscript, gast.Name, gast.Str)): raise TypeError if anno.hasanno(node, 'temp_var'): return create_grad(anno.getanno(node, 'temp_var'), namer, tangent) def _name_grad(node): if not isinstance(node, gast.Name): raise TypeError varname = node.id name = namer.grad(varname, tangent) grad_node = gast.Name( id=name, ctx=None, annotation=None) anno.setanno(grad_node, 'adjoint_var', node) return grad_node if isinstance(node, gast.Subscript): grad_node = create_grad(node.value, namer, tangent=tangent) grad_node.ctx = gast.Load() return gast.Subscript(value=grad_node, slice=node.slice, ctx=None) elif isinstance(node, gast.Str): grad_node = create_grad( gast.Name(id=node.s, ctx=None, annotation=None), namer, tangent=tangent) return gast.Str(grad_node.id) else: return _name_grad(node)
python
{ "resource": "" }
q28161
create_temp_grad
train
def create_temp_grad(node, namer, tangent=False): """Create a variable to store partial gradients. Args: node: See `create_grad`. namer: See `create_grad`. tangent: See `create_grad`. Returns: node: See `create_grad`. Returns a node representing the partial gradient. Note that this is always a simple variable e.g. the temporary partial of `x[i]` can be something like `_dxi`. Nodes are given an annotation `temp_adjoint_var`. """ if not isinstance(node, (gast.Subscript, gast.Name)): raise TypeError def _name_temp_grad(node): name = namer.temp_grad(node.id, tangent) temp_node = gast.Name(id=name, annotation=None, ctx=None) return temp_node if isinstance(node, gast.Subscript): temp_node = _name_temp_grad(node.value) else: temp_node = _name_temp_grad(node) anno.setanno(temp_node, 'temp_adjoint_var', node) return temp_node
python
{ "resource": "" }
q28162
create_temp
train
def create_temp(node, namer): """Create a temporary variable. Args: node: Create a temporary variable to store this variable in. namer: A naming object that guarantees the names are unique. Returns: node: See `create_grad`. Returns a temporary variable, which is always a simple variable annotated with `temp_var`. """ if isinstance(node, gast.Name): name = node.id elif isinstance(node, (gast.Attribute, gast.Subscript)): name = node.value.id else: raise TypeError temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None) anno.setanno(temp_node, 'temp_var', node) return temp_node
python
{ "resource": "" }
q28163
forward_ad
train
def forward_ad(node, wrt, preserve_result=False, check_dims=True): """Perform forward-mode AD on an AST. This function analyses the AST to determine which variables are active and proceeds by taking the naive derivative. Before returning the primal and adjoint it annotates push and pop statements as such. Args: node: A `FunctionDef` AST node. wrt: A tuple of argument indices with respect to which we take the derivative. preserve_result: A boolean indicating whether the original non-differentiated function value should be returned check_dims: A boolean indicating whether the provided derivatives should have the same shape as their corresponding arguments. Returns: mod: A `Module` node containing the naive primal and adjoint of the function which can be fed to the `split` and `joint` functions. required: A list of tuples of functions and argument indices. These functions were called by the function but did not have an adjoint. """ if not isinstance(node, gast.FunctionDef): raise TypeError # Activity analysis cfg_obj = cfg.CFG.build_cfg(node) cfg.Active(range(len(node.args.args))).visit(cfg_obj.entry) # Build forward mode function fad = ForwardAD(wrt, preserve_result, check_dims) node = fad.visit(node) # Annotate stacks node = annotate.find_stacks(node) # Clean up naive forward-mode fcode node = gast.Module([node]) anno.clearanno(node) return node, fad.required
python
{ "resource": "" }
q28164
to_source
train
def to_source(node, indentation=' ' * 4): """Return source code of a given AST.""" if isinstance(node, gast.AST): node = gast.gast_to_ast(node) generator = SourceWithCommentGenerator(indentation, False, astor.string_repr.pretty_string) generator.visit(node) generator.result.append('\n') return astor.source_repr.pretty_source(generator.result).lstrip()
python
{ "resource": "" }
q28165
parse_function
train
def parse_function(fn): """Get the source of a function and return its AST.""" try: return parse_string(inspect.getsource(fn)) except (IOError, OSError) as e: raise ValueError( 'Cannot differentiate function: %s. Tangent must be able to access the ' 'source code of the function. Functions defined in a Python ' 'interpreter and functions backed by C extension modules do not ' 'have accessible source code.' % e)
python
{ "resource": "" }
q28166
quote
train
def quote(src_string, return_expr=False): """Go from source code to AST nodes. This function returns a tree without enclosing `Module` or `Expr` nodes. Args: src_string: The source code to parse. return_expr: Whether or not to return a containing expression. This can be set to `True` if the result is to be part of a series of statements. Returns: An AST of the given source code. """ node = parse_string(src_string) body = node.body if len(body) == 1: if isinstance(body[0], gast.Expr) and not return_expr: out = body[0].value else: out = body[0] else: out = node return out
python
{ "resource": "" }
q28167
get_push_pop
train
def get_push_pop(): """Create pop and push nodes that are linked. Returns: A push and pop node which have `push_func` and `pop_func` annotations respectively, identifying them as such. They also have a `pop` and `push` annotation respectively, which links the push node to the pop node and vice versa. """ push = copy.deepcopy(PUSH) pop = copy.deepcopy(POP) anno.setanno(push, 'pop', pop) anno.setanno(push, 'gen_push', True) anno.setanno(pop, 'push', push) op_id = _generate_op_id() return push, pop, op_id
python
{ "resource": "" }
q28168
get_push_pop_stack
train
def get_push_pop_stack(): """Create pop and push nodes for substacks that are linked. Returns: A push and pop node which have `push_func` and `pop_func` annotations respectively, identifying them as such. They also have a `pop` and `push` annotation respectively, which links the push node to the pop node and vice versa. """ push = copy.deepcopy(PUSH_STACK) pop = copy.deepcopy(POP_STACK) anno.setanno(push, 'pop', pop) anno.setanno(push, 'gen_push', True) anno.setanno(pop, 'push', push) op_id = _generate_op_id() return push, pop, op_id
python
{ "resource": "" }
q28169
reverse_ad
train
def reverse_ad(node, wrt, preserve_result, check_dims): """Perform reverse-mode AD on an AST. This function analyses the AST to determine which variables are active and proceeds by taking the naive derivative. Before returning the primal and adjoint it annotates push and pop statements as such. Args: node: A `FunctionDef` AST node. wrt: A tuple of argument indices with respect to which we take the derivative. preserve_result: A boolean indicating whether the generated derivative function should also return the original return value. check_dims: A boolean indicating whether the seed derivatives should have their dimensions checked to match their primal counterpart. Returns: mod: A `Module` node containing the naive primal and adjoint of the function which can be fed to the `split` and `joint` functions. required: A list of tuples of functions and argument indices. These functions were called by the function but did not have an adjoint. """ if not isinstance(node, gast.FunctionDef): raise TypeError # Activity analysis cfg.forward(node, cfg.Active(wrt)) ad = ReverseAD(wrt, preserve_result, check_dims) pri, adj = ad.visit(node) mod = gast.Module(body=[pri, adj]) mod = annotate.find_stacks(mod) return mod, ad.required, ad.stack
python
{ "resource": "" }
q28170
store_state
train
def store_state(node, reaching, defined, stack): """Push the final state of the primal onto the stack for the adjoint. Python's scoping rules make it possible for variables to not be defined in certain blocks based on the control flow path taken at runtime. In order to make sure we don't try to push non-existing variables onto the stack, we defined these variables explicitly (by assigning `None` to them) at the beginning of the function. All the variables that reach the return statement are pushed onto the stack, and in the adjoint they are popped off in reverse order. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. reaching: The variable definitions that reach the end of the primal. defined: The variables defined at the end of the primal. stack: The stack node to use for storing and restoring state. Returns: node: A node with the requisite pushes and pops added to make sure that state is transferred between primal and adjoint split motion calls. """ defs = [def_ for def_ in reaching if not isinstance(def_[1], gast.arguments)] if not len(defs): return node reaching, original_defs = zip(*defs) # Explicitly define variables that might or might not be in scope at the end assignments = [] for id_ in set(reaching) - defined: assignments.append(quoting.quote('{} = None'.format(id_))) # Store variables at the end of the function and restore them store = [] load = [] for id_, def_ in zip(reaching, original_defs): # If the original definition of a value that we need to store # was an initialization as a stack, then we should be using `push_stack` # to store its state, and `pop_stack` to restore it. This allows # us to avoid doing any `add_grad` calls on the stack, which result # in type errors in unoptimized mode (they are usually elided # after calling `dead_code_elimination`). if isinstance( def_, gast.Assign) and 'tangent.Stack()' in quoting.unquote(def_.value): push, pop, op_id = get_push_pop_stack() else: push, pop, op_id = get_push_pop() store.append( template.replace( 'push(_stack, val, op_id)', push=push, val=id_, _stack=stack, op_id=op_id)) load.append( template.replace( 'val = pop(_stack, op_id)', pop=pop, val=id_, _stack=stack, op_id=op_id)) body, return_ = node.body[0].body[:-1], node.body[0].body[-1] node.body[0].body = assignments + body + store + [return_] node.body[1].body = load[::-1] + node.body[1].body return node
python
{ "resource": "" }
q28171
split
train
def split(node, stack): """Carry over the state from the primal to the adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. stack: The stack node to use for storing and restoring state. Returns: func: A `Module` node with two function definitions containing the primal and adjoint respectively. """ node, defined, reaching = _fix(node) # Store and restore the state node = store_state(node, reaching, defined, stack) # Clean up anno.clearanno(node) return node
python
{ "resource": "" }
q28172
joint
train
def joint(node): """Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint. """ node, _, _ = _fix(node) body = node.body[0].body[:-1] + node.body[1].body func = gast.Module(body=[gast.FunctionDef( name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)]) # Clean up anno.clearanno(func) return func
python
{ "resource": "" }
q28173
_fix
train
def _fix(node): """Fix the naive construction of the adjont. See `fixes.py` for details. This function also returns the result of reaching definitions analysis so that `split` mode can use this to carry over the state from primal to adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: node: A module with the primal and adjoint function with additional variable definitions and such added so that pushes onto the stack and gradient accumulations are all valid. defined: The variables defined at the end of the primal. reaching: The variable definitions that reach the end of the primal. """ # Do reaching definitions analysis on primal and adjoint pri_cfg = cfg.CFG.build_cfg(node.body[0]) defined = cfg.Defined() defined.visit(pri_cfg.entry) reaching = cfg.ReachingDefinitions() reaching.visit(pri_cfg.entry) cfg.forward(node.body[1], cfg.Defined()) cfg.forward(node.body[1], cfg.ReachingDefinitions()) # Remove pushes of variables that were never defined fixes.CleanStack().visit(node) fixes.FixStack().visit(node.body[0]) # Change accumulation into definition if possible fixes.CleanGrad().visit(node.body[1]) # Define gradients that might or might not be defined fixes.FixGrad().visit(node.body[1]) return node, defined.exit, reaching.exit
python
{ "resource": "" }
q28174
ReverseAD.is_active
train
def is_active(self, node): """Checks whether a statement is active. An assignment is active when its right hand side contains active variables. Args: node: an instance of gast.Assign Returns: Whether the statement is active. """ # Special case: If the right hand side is a pop statement, we want to # process it if (isinstance(node.value, gast.Call) and anno.getanno(node.value, 'func', False) == utils.pop): return True for succ in gast.walk(node.value): if (isinstance(succ, gast.Name) and isinstance(succ.ctx, gast.Load) and succ.id in self.active_variables): return True return False
python
{ "resource": "" }
q28175
ReverseAD.visit_statements
train
def visit_statements(self, nodes): """Generate the adjoint of a series of statements.""" primals, adjoints = [], collections.deque() for node in nodes: primal, adjoint = self.visit(node) if not isinstance(primal, list): primal = [primal] if not isinstance(adjoint, list): adjoint = [adjoint] # Methods will return `None` if the node is to be removed, so remove them primals.extend(filter(None, primal)) # We reverse the order of the adjoints, but not the statements in # the adjoint itself adjoints.extendleft(filter(None, adjoint[::-1])) return primals, list(adjoints)
python
{ "resource": "" }
q28176
ReverseAD.primal_and_adjoint_for_tracing
train
def primal_and_adjoint_for_tracing(self, node): """Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint. """ primal_template = grads.primals[tracing.Traceable] adjoint_template = grads.adjoints[tracing.Traceable] # Prep to_pack = node.args target = ast_.copy_node(self.orig_target) vjp = quoting.quote(self.namer.unique('%s_grad' % node.func.id)) tmp = create.create_temp(quoting.quote('tmp'), self.namer) assert len(node.keywords) == 0 # Full replacement of primal # TODO: do we need to set 'pri_call' on this? primal = template.replace( primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load())) # Building adjoint using the vjp generated with the primal dto_pack = gast.Tuple( elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store()) adjoint = template.replace( adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack) return primal, adjoint
python
{ "resource": "" }
q28177
TreeTransformer.prepend
train
def prepend(self, node): """Prepend a statement to the current statement. Note that multiple calls to prepend will result in the last statement to be prepended to end up at the top. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement. """ if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_prepend[-1].appendleft(node)
python
{ "resource": "" }
q28178
TreeTransformer.append
train
def append(self, node): """Append a statement to the current statement. Note that multiple calls to append will result in the last statement to be appended to end up at the bottom. Args: node: The statement to append. Raises: ValueError: If the given node is not a statement. """ if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_append[-1].append(node)
python
{ "resource": "" }
q28179
TreeTransformer.insert_top
train
def insert_top(self, node): """Insert statements at the top of the function body. Note that multiple calls to `insert_top` will result in the statements being prepended in that order; this is different behavior from `prepend`. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement. """ if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_insert_top.append(node)
python
{ "resource": "" }
q28180
TreeTransformer.prepend_block
train
def prepend_block(self, node, reverse=False): """Prepend a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement. """ if not isinstance(node, grammar.STATEMENTS): raise ValueError if reverse: self.to_prepend_block[-1].appendleft(node) else: self.to_prepend_block[-1].append(node)
python
{ "resource": "" }
q28181
TreeTransformer.append_block
train
def append_block(self, node, reverse=False): """Append a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement. """ if not isinstance(node, grammar.STATEMENTS): raise ValueError if reverse: self.to_append_block[-1].appendleft(node) else: self.to_append_block[-1].append(node)
python
{ "resource": "" }
q28182
TreeTransformer.visit_statements
train
def visit_statements(self, nodes): """Visit a series of nodes in a node body. This function is factored out so that it can be called recursively on statements that are appended or prepended. This allows e.g. a nested expression to prepend a statement, and that statement can prepend a statement again, etc. Args: nodes: A list of statements. Returns: A list of transformed statements. """ for node in nodes: if isinstance(node, gast.AST): self.to_prepend.append(deque()) self.to_append.append(deque()) node = self.visit(node) self.visit_statements(self.to_prepend.pop()) if isinstance(node, gast.AST): self.to_insert[-1].append(node) elif node: self.to_insert[-1].extend(node) self.visit_statements(self.to_append.pop()) else: self.to_insert[-1].append(node) return self.to_insert[-1]
python
{ "resource": "" }
q28183
resolve_calls
train
def resolve_calls(func): """Parse a function into an AST with function calls resolved. Since the calls are resolved using the global and local namespace of the function it means that procedural parameters (i.e. functions passed as arguments) won't be resolved. Similarly, functions defined inside of the function that we are trying to resolve won't be resolved, since they are not in the local namespace of the outer function. The function definition itself is also annotated, so that it can be matched to calls to it in other functions. Args: func: The function whose calls are being resolved. Returns: node: An AST where each `Call` node has a `func` annotation with the function handle that the call resolves to. Raises: AttributeError: When a function is used on the RHS of an assignment cannot be resolved (because it was passed as an argument or was defined in the body of the function). """ node = quoting.parse_function(func) ResolveCalls(func).visit(node) return node
python
{ "resource": "" }
q28184
find_stacks
train
def find_stacks(node, strict=False): """Find pushes and pops to the stack and annotate them as such. Args: node: An AST node that might contain stack pushes and pops. strict: A boolean indicating whether to stringently test whether each push and pop are matched. This is not always possible when taking higher-order derivatives of code generated in split-motion. Returns: node: The node passed in, but with pushes and pops annotated in AST nodes. """ # First, find all stack operation IDs. fso = FindStackOps() fso.visit(node) # Using those IDs, make annotations onto the push and pop nodes. AnnotateStacks(fso.push_pop_pairs, strict).visit(node) return node
python
{ "resource": "" }
q28185
unused
train
def unused(node): """Find unused definitions that can be remove. This runs reaching definitions analysis followed by a walk over the AST to find all variable definitions that are not used later on. Args: node: The AST of e.g. a function body to find unused variable definitions. Returns: unused: After visiting all the nodes, this attribute contanis a set of definitions in the form of `(variable_name, node)` pairs which are unused in this AST. """ cfg.forward(node, cfg.ReachingDefinitions()) unused_obj = Unused() unused_obj.visit(node) return unused_obj.unused
python
{ "resource": "" }
q28186
Unused.unused
train
def unused(self): """Calculate which AST nodes are unused. Note that we have to take special care in the case of x,y = f(z) where x is used later, but y is not.""" unused = self.definitions - self.used # Filter (variable_name,node) pairs that should be removed, because # node is used elsewhere used_nodes = set([u[1] for u in self.used]) unused = set([u for u in unused if u[1] not in used_nodes]) return unused
python
{ "resource": "" }
q28187
package_config
train
def package_config(): """Use pkg-config to get library build parameters and tesseract version.""" p = subprocess.Popen(['pkg-config', '--exists', '--atleast-version={}'.format(_TESSERACT_MIN_VERSION), '--print-errors', 'tesseract'], stderr=subprocess.PIPE) _, error = p.communicate() if p.returncode != 0: raise Exception(error) p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'tesseract'], stdout=subprocess.PIPE) output, _ = p.communicate() flags = _read_string(output).strip().split() p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'lept'], stdout=subprocess.PIPE) output, _ = p.communicate() flags2 = _read_string(output).strip().split() options = {'-L': 'library_dirs', '-I': 'include_dirs', '-l': 'libraries'} config = {} import itertools for f in itertools.chain(flags, flags2): try: opt = options[f[:2]] except KeyError: continue val = f[2:] if opt == 'include_dirs' and psplit(val)[1].strip(os.sep) in ('leptonica', 'tesseract'): val = dirname(val) config.setdefault(opt, set()).add(val) config = {k: list(v) for k, v in config.items()} p = subprocess.Popen(['pkg-config', '--modversion', 'tesseract'], stdout=subprocess.PIPE) version, _ = p.communicate() version = _read_string(version).strip() _LOGGER.info("Supporting tesseract v{}".format(version)) config['cython_compile_time_env'] = {'TESSERACT_VERSION': version_to_int(version)} _LOGGER.info("Configs from pkg-config: {}".format(config)) return config
python
{ "resource": "" }
q28188
get_tesseract_version
train
def get_tesseract_version(): """Try to extract version from tesseract otherwise default min version.""" config = {'libraries': ['tesseract', 'lept']} try: p = subprocess.Popen(['tesseract', '-v'], stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout_version, version = p.communicate() version = _read_string(version).strip() if version == '': version = _read_string(stdout_version).strip() version_match = re.search(r'^tesseract ((?:\d+\.)+\d+).*', version, re.M) if version_match: version = version_match.group(1) else: _LOGGER.warn('Failed to extract tesseract version number from: {}'.format(version)) version = _TESSERACT_MIN_VERSION except OSError as e: _LOGGER.warn('Failed to extract tesseract version from executable: {}'.format(e)) version = _TESSERACT_MIN_VERSION _LOGGER.info("Supporting tesseract v{}".format(version)) version = version_to_int(version) config['cython_compile_time_env'] = {'TESSERACT_VERSION': version} _LOGGER.info("Building with configs: {}".format(config)) return config
python
{ "resource": "" }
q28189
get_build_args
train
def get_build_args(): """Return proper build parameters.""" try: build_args = package_config() except Exception as e: if isinstance(e, OSError): if e.errno != errno.ENOENT: _LOGGER.warn('Failed to run pkg-config: {}'.format(e)) else: _LOGGER.warn('pkg-config failed to find tesseract/lept libraries: {}'.format(e)) build_args = get_tesseract_version() if build_args['cython_compile_time_env']['TESSERACT_VERSION'] >= 0x3050200: _LOGGER.debug('tesseract >= 03.05.02 requires c++11 compiler support') build_args['extra_compile_args'] = ['-std=c++11', '-DUSE_STD_NAMESPACE'] _LOGGER.debug('build parameters: {}'.format(build_args)) return build_args
python
{ "resource": "" }
q28190
MultiqcModule.parse_star_report
train
def parse_star_report (self, raw_data): """ Parse the final STAR log file. """ regexes = { 'total_reads': r"Number of input reads \|\s+(\d+)", 'avg_input_read_length': r"Average input read length \|\s+([\d\.]+)", 'uniquely_mapped': r"Uniquely mapped reads number \|\s+(\d+)", 'uniquely_mapped_percent': r"Uniquely mapped reads % \|\s+([\d\.]+)", 'avg_mapped_read_length': r"Average mapped length \|\s+([\d\.]+)", 'num_splices': r"Number of splices: Total \|\s+(\d+)", 'num_annotated_splices': r"Number of splices: Annotated \(sjdb\) \|\s+(\d+)", 'num_GTAG_splices': r"Number of splices: GT/AG \|\s+(\d+)", 'num_GCAG_splices': r"Number of splices: GC/AG \|\s+(\d+)", 'num_ATAC_splices': r"Number of splices: AT/AC \|\s+(\d+)", 'num_noncanonical_splices': r"Number of splices: Non-canonical \|\s+(\d+)", 'mismatch_rate': r"Mismatch rate per base, % \|\s+([\d\.]+)", 'deletion_rate': r"Deletion rate per base \|\s+([\d\.]+)", 'deletion_length': r"Deletion average length \|\s+([\d\.]+)", 'insertion_rate': r"Insertion rate per base \|\s+([\d\.]+)", 'insertion_length': r"Insertion average length \|\s+([\d\.]+)", 'multimapped': r"Number of reads mapped to multiple loci \|\s+(\d+)", 'multimapped_percent': r"% of reads mapped to multiple loci \|\s+([\d\.]+)", 'multimapped_toomany': r"Number of reads mapped to too many loci \|\s+(\d+)", 'multimapped_toomany_percent': r"% of reads mapped to too many loci \|\s+([\d\.]+)", 'unmapped_mismatches_percent': r"% of reads unmapped: too many mismatches \|\s+([\d\.]+)", 'unmapped_tooshort_percent': r"% of reads unmapped: too short \|\s+([\d\.]+)", 'unmapped_other_percent': r"% of reads unmapped: other \|\s+([\d\.]+)", } parsed_data = {} for k, r in regexes.items(): r_search = re.search(r, raw_data, re.MULTILINE) if r_search: parsed_data[k] = float(r_search.group(1)) # Figure out the numbers for unmapped as for some reason only the percentages are given try: total_mapped = parsed_data['uniquely_mapped'] + parsed_data['multimapped'] + parsed_data['multimapped_toomany'] unmapped_count = parsed_data['total_reads'] - total_mapped total_unmapped_percent = parsed_data['unmapped_mismatches_percent'] + parsed_data['unmapped_tooshort_percent'] + parsed_data['unmapped_other_percent'] try: parsed_data['unmapped_mismatches'] = int(round(unmapped_count * (parsed_data['unmapped_mismatches_percent'] / total_unmapped_percent), 0)) parsed_data['unmapped_tooshort'] = int(round(unmapped_count * (parsed_data['unmapped_tooshort_percent'] / total_unmapped_percent), 0)) parsed_data['unmapped_other'] = int(round(unmapped_count * (parsed_data['unmapped_other_percent'] / total_unmapped_percent), 0)) except ZeroDivisionError: parsed_data['unmapped_mismatches'] = 0 parsed_data['unmapped_tooshort'] = 0 parsed_data['unmapped_other'] = 0 except KeyError: pass if len(parsed_data) == 0: return None return parsed_data
python
{ "resource": "" }
q28191
MultiqcModule.parse_star_genecount_report
train
def parse_star_genecount_report(self, f): """ Parse a STAR gene counts output file """ # Three numeric columns: unstranded, stranded/first-strand, stranded/second-strand keys = [ 'N_unmapped', 'N_multimapping', 'N_noFeature', 'N_ambiguous' ] unstranded = { 'N_genes': 0 } first_strand = { 'N_genes': 0 } second_strand = { 'N_genes': 0 } num_errors = 0 num_genes = 0 for l in f['f']: s = l.split("\t") try: for i in [1,2,3]: s[i] = float(s[i]) if s[0] in keys: unstranded[s[0]] = s[1] first_strand[s[0]] = s[2] second_strand[s[0]] = s[3] else: unstranded['N_genes'] += s[1] first_strand['N_genes'] += s[2] second_strand['N_genes'] += s[3] num_genes += 1 except IndexError: # Tolerate a few errors in case there is something random added at the top of the file num_errors += 1 if num_errors > 10 and num_genes == 0: log.warning("Error parsing {}".format(f['fn'])) return None if num_genes > 0: return { 'unstranded': unstranded, 'first_strand': first_strand, 'second_strand': second_strand } else: return None
python
{ "resource": "" }
q28192
MultiqcModule.star_stats_table
train
def star_stats_table(self): """ Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report """ headers = OrderedDict() headers['uniquely_mapped_percent'] = { 'title': '% Aligned', 'description': '% Uniquely mapped reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['uniquely_mapped'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'Uniquely mapped reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.star_data, headers)
python
{ "resource": "" }
q28193
MultiqcModule.star_genecount_chart
train
def star_genecount_chart (self): """ Make a plot for the ReadsPerGene output """ # Specify the order of the different possible categories keys = OrderedDict() keys['N_genes'] = { 'color': '#2f7ed8', 'name': 'Overlapping Genes' } keys['N_noFeature'] = { 'color': '#0d233a', 'name': 'No Feature' } keys['N_ambiguous'] = { 'color': '#492970', 'name': 'Ambiguous Features' } keys['N_multimapping'] = { 'color': '#f28f43', 'name': 'Multimapping' } keys['N_unmapped'] = { 'color': '#7f0000', 'name': 'Unmapped' } # Config for the plot pconfig = { 'id': 'star_gene_counts', 'title': 'STAR: Gene Counts', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'data_labels': ['Unstranded','Same Stranded','Reverse Stranded'] } datasets = [ self.star_genecounts_unstranded, self.star_genecounts_first_strand, self.star_genecounts_second_strand ] return bargraph.plot(datasets, [keys,keys,keys,keys], pconfig)
python
{ "resource": "" }
q28194
MultiqcModule.mirtrace_length_plot
train
def mirtrace_length_plot(self): """ Generate the miRTrace Read Length Distribution""" data = dict() for s_name in self.length_data: try: data[s_name] = {int(d): int(self.length_data[s_name][d]) for d in self.length_data[s_name]} except KeyError: pass if len(data) == 0: log.debug('No valid data for read length distribution') return None config = { 'id': 'mirtrace_length_plot', 'title': 'miRTrace: Read Length Distribution', 'ylab': 'Read Count', 'xlab': 'Read Lenth (bp)', 'ymin': 0, 'xmin': 0, 'xDecimals': False, 'tt_label': '<b>Read Length (bp) {point.x}</b>: {point.y} Read Count', 'xPlotBands': [ {'from': 40, 'to': 50, 'color': '#ffebd1'}, {'from': 26, 'to': 40, 'color': '#e2f5ff'}, {'from': 18, 'to': 26, 'color': '#e5fce0'}, {'from': 0, 'to': 18, 'color': '#ffffe2'}, ] } return linegraph.plot(data, config)
python
{ "resource": "" }
q28195
MultiqcModule.mirtrace_rna_categories
train
def mirtrace_rna_categories(self): """ Generate the miRTrace RNA Categories""" # Specify the order of the different possible categories keys = OrderedDict() keys['reads_mirna'] = { 'color': '#33a02c', 'name': 'miRNA' } keys['reads_rrna'] = { 'color': '#ff7f00', 'name': 'rRNA' } keys['reads_trna'] = { 'color': '#1f78b4', 'name': 'tRNA' } keys['reads_artifact'] = { 'color': '#fb9a99', 'name': 'Artifact' } keys['reads_unknown'] = { 'color': '#d9d9d9', 'name': 'Unknown' } # Config for the plot config = { 'id': 'mirtrace_rna_categories_plot', 'title': 'miRTrace: RNA Categories', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } return bargraph.plot(self.summary_data, keys, config)
python
{ "resource": "" }
q28196
MultiqcModule.mirtrace_contamination_check
train
def mirtrace_contamination_check(self): """ Generate the miRTrace Contamination Check""" # A library of 24 colors. Should be enough for this plot color_lib = ['rgb(166,206,227)', 'rgb(31,120,180)', 'rgb(178,223,138)', 'rgb(51,160,44)', 'rgb(251,154,153)', 'rgb(227,26,28)', 'rgb(253,191,111)', 'rgb(255,127,0)', 'rgb(202,178,214)', 'rgb(106,61,154)', 'rgb(255,255,153)', 'rgb(177,89,40)', 'rgb(141,211,199)', 'rgb(255,255,179)', 'rgb(190,186,218)', 'rgb(251,128,114)', 'rgb(128,177,211)', 'rgb(253,180,98)', 'rgb(179,222,105)', 'rgb(252,205,229)', 'rgb(217,217,217)', 'rgb(188,128,189)', 'rgb(204,235,197)', 'rgb(255,237,111)'] idx = 0 # Specify the order of the different possible categories keys = OrderedDict() for clade in self.contamination_data[list(self.contamination_data.keys())[0]]: keys[clade] = { 'color': color_lib[idx], 'name': clade } if idx < 23: idx += 1 else: idx = 0 # Config for the plot config = { 'cpswitch_c_active': False, 'id': 'mirtrace_contamination_check_plot', 'title': 'miRTrace: Contamination Check', 'ylab': '# miRNA detected', 'cpswitch_counts_label': 'Number of detected miRNA' } return bargraph.plot(self.contamination_data, keys, config)
python
{ "resource": "" }
q28197
MultiqcModule.mirtrace_complexity_plot
train
def mirtrace_complexity_plot(self): """ Generate the miRTrace miRNA Complexity Plot""" data = dict() for s_name in self.complexity_data: try: data[s_name] = {int(self.complexity_data[s_name][d]) : int(d) for d in self.complexity_data[s_name]} except KeyError: pass if len(data) == 0: log.debug('No valid data for miRNA complexity') return None config = { 'id': 'mirtrace_complexity_plot', 'title': 'miRTrace: miRNA Complexity Plot', 'ylab': 'Distinct miRNA Count', 'xlab': 'Number of Sequencing Reads', 'ymin': 0, 'xmin': 1, 'xDecimals': False, 'tt_label': '<b>Number of Sequencing Reads {point.x}</b>: {point.y} Distinct miRNA Count', } return linegraph.plot(data, config)
python
{ "resource": "" }
q28198
StatsReportMixin.bcftools_stats_genstats_headers
train
def bcftools_stats_genstats_headers(self): """ Add key statistics to the General Stats table """ stats_headers = OrderedDict() stats_headers['number_of_records'] = { 'title': 'Vars', 'description': 'Variations total', 'min': 0, 'format': '{:,.0f}', } stats_headers['variations_hom'] = { 'title': 'Hom', 'description': 'Variations homozygous', 'min': 0, 'format': '{:,.0f}', } stats_headers['variations_het'] = { 'title': 'Het', 'description': 'Variations heterozygous', 'min': 0, 'format': '{:,.0f}', } stats_headers['number_of_SNPs'] = { 'title': 'SNP', 'description': 'Variation SNPs', 'min': 0, 'format': '{:,.0f}', } stats_headers['number_of_indels'] = { 'title': 'Indel', 'description': 'Variation Insertions/Deletions', 'min': 0, 'format': '{:,.0f}', } stats_headers['tstv'] = { 'title': 'Ts/Tv', 'description': 'Variant SNP transition / transversion ratio', 'min': 0, 'format': '{:,.2f}', } stats_headers['number_of_MNPs'] = { 'title': 'MNP', 'description': 'Variation multinucleotide polymorphisms', 'min': 0, 'format': '{:,.0f}', "hidden": True, } return stats_headers
python
{ "resource": "" }
q28199
MultiqcModule.parse_hicup_logs
train
def parse_hicup_logs(self, f): """ Parse a HiCUP summary report """ if not f['fn'].endswith('.txt'): return None header = [] lines = f['f'].splitlines() for l in lines: s = l.split("\t") if len(header) == 0: if s[0] != 'File': return None header = s[1:] else: s_name = self.clean_s_name(s[0], f['root']).lstrip('HiCUP_output/') parsed_data = {} for idx, num in enumerate(s[1:]): try: parsed_data[header[idx]] = float(num) except: parsed_data[header[idx]] = num parsed_data['Duplicate_Read_Pairs'] = parsed_data['Valid_Pairs'] - parsed_data['Deduplication_Read_Pairs_Uniques'] if s_name in self.hicup_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.hicup_data[s_name] = parsed_data
python
{ "resource": "" }