sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def from_dict(cls, dictionary): """Create a Berksfile instance from a dict.""" cookbooks = set() sources = set() other = set() # put these in order groups = [sources, cookbooks, other] for key, val in dictionary.items(): if key == 'cookbook': cookbooks.update({cls.cookbook_statement(cbn, meta) for cbn, meta in val.items()}) elif key == 'source': sources.update({"source '%s'" % src for src in val}) elif key == 'metadata': other.add('metadata') body = '' for group in groups: if group: body += '\n' body += '\n'.join(group) return cls.from_string(body)
Create a Berksfile instance from a dict.
entailment
def cookbook_statement(cookbook_name, metadata=None): """Return a valid Ruby 'cookbook' statement for the Berksfile.""" line = "cookbook '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Berksfile dependency hash for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) # not like the others... if 'constraint' in metadata: line += ", '%s'" % metadata.pop('constraint') for opt, spec in metadata.items(): line += ", %s: '%s'" % (opt, spec) return line
Return a valid Ruby 'cookbook' statement for the Berksfile.
entailment
def merge(self, other): """Add requirements from 'other' Berksfile into this one.""" if not isinstance(other, Berksfile): raise TypeError("Berksfile to merge should be a 'Berksfile' " "instance, not %s.", type(other)) current = self.to_dict() new = other.to_dict() # compare and gather cookbook dependencies berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta) for cbn, meta in new.get('cookbook', {}).items() if cbn not in current.get('cookbook', {})] # compare and gather 'source' requirements berks_writelines.extend(["source '%s'\n" % src for src in new.get('source', []) if src not in current.get('source', [])]) self.write_statements(berks_writelines) return self.to_dict()
Add requirements from 'other' Berksfile into this one.
entailment
def manifest(self): """The manifest definition of the stencilset as a dict.""" if not self._manifest: with open(self.manifest_path) as man: self._manifest = json.load(man) return self._manifest
The manifest definition of the stencilset as a dict.
entailment
def stencils(self): """List of stencils.""" if not self._stencils: self._stencils = self.manifest['stencils'] return self._stencils
List of stencils.
entailment
def get_stencil(self, stencil_name, **options): """Return a Stencil instance given a stencil name.""" if stencil_name not in self.manifest.get('stencils', {}): raise ValueError("Stencil '%s' not declared in StencilSet " "manifest." % stencil_name) stencil = copy.deepcopy(self.manifest) allstencils = stencil.pop('stencils') stencil.pop('default_stencil', None) override = allstencils[stencil_name] utils.deepupdate(stencil, override) # merge options, prefer **options (probably user-supplied) for opt, data in stencil.get('options', {}).items(): if opt not in options: options[opt] = data.get('default', '') stencil['options'] = options name = stencil['options'].get('name') files = stencil['files'].copy() for fil, templ in files.items(): if '<NAME>' in fil: # check for the option b/c there are # cases in which it may not exist if not name: raise ValueError("Stencil does not include a name option") stencil['files'].pop(fil) fil = fil.replace('<NAME>', name) stencil['files'][fil] = templ return stencil
Return a Stencil instance given a stencil name.
entailment
def _determine_selected_stencil(stencil_set, stencil_definition): """Determine appropriate stencil name for stencil definition. Given a fastfood.json stencil definition with a stencil set, figure out what the name of the stencil within the set should be, or use the default """ if 'stencil' not in stencil_definition: selected_stencil_name = stencil_set.manifest.get('default_stencil') else: selected_stencil_name = stencil_definition.get('stencil') if not selected_stencil_name: raise ValueError("No stencil name, within stencil set %s, specified." % stencil_definition['name']) return selected_stencil_name
Determine appropriate stencil name for stencil definition. Given a fastfood.json stencil definition with a stencil set, figure out what the name of the stencil within the set should be, or use the default
entailment
def _build_template_map(cookbook, cookbook_name, stencil): """Build a map of variables for this generated cookbook and stencil. Get template variables from stencil option values, adding the default ones like cookbook and cookbook year. """ template_map = { 'cookbook': {"name": cookbook_name}, 'options': stencil['options'] } # Cookbooks may not yet have metadata, so we pass an empty dict if so try: template_map['cookbook'] = cookbook.metadata.to_dict().copy() except ValueError: # ValueError may be returned if this cookbook does not yet have any # metadata.rb written by a stencil. This is okay, as everyone should # be using the base stencil first, and then we'll try to call # cookbook.metadata again in this method later down. pass template_map['cookbook']['year'] = datetime.datetime.now().year return template_map
Build a map of variables for this generated cookbook and stencil. Get template variables from stencil option values, adding the default ones like cookbook and cookbook year.
entailment
def _render_binaries(files, written_files): """Write binary contents from filetable into files. Using filetable for the input files, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file. """ for source_path, target_path in files.items(): needdir = os.path.dirname(target_path) assert needdir, "Target should have valid parent dir" try: os.makedirs(needdir) except OSError as err: if err.errno != errno.EEXIST: raise if os.path.isfile(target_path): if target_path in written_files: LOG.warning("Previous stencil has already written file %s.", target_path) else: print("Skipping existing file %s" % target_path) LOG.info("Skipping existing file %s", target_path) continue print("Writing rendered file %s" % target_path) LOG.info("Writing rendered file %s", target_path) shutil.copy(source_path, target_path) if os.path.exists(target_path): written_files.append(target_path)
Write binary contents from filetable into files. Using filetable for the input files, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file.
entailment
def _render_templates(files, filetable, written_files, force, open_mode='w'): """Write template contents from filetable into files. Using filetable for the rendered templates, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file. """ for tpl_path, content in filetable: target_path = files[tpl_path] needdir = os.path.dirname(target_path) assert needdir, "Target should have valid parent dir" try: os.makedirs(needdir) except OSError as err: if err.errno != errno.EEXIST: raise if os.path.isfile(target_path): if force: LOG.warning("Forcing overwrite of existing file %s.", target_path) elif target_path in written_files: LOG.warning("Previous stencil has already written file %s.", target_path) else: print("Skipping existing file %s" % target_path) LOG.info("Skipping existing file %s", target_path) continue with open(target_path, open_mode) as newfile: print("Writing rendered file %s" % target_path) LOG.info("Writing rendered file %s", target_path) newfile.write(content) written_files.append(target_path)
Write template contents from filetable into files. Using filetable for the rendered templates, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file.
entailment
def build_cookbook(build_config, templatepack_path, cookbooks_home, force=False): """Build a cookbook from a fastfood.json file. Can build on an existing cookbook, otherwise this will create a new cookbook for you based on your templatepack. """ with open(build_config) as cfg: cfg = json.load(cfg) cookbook_name = cfg['name'] template_pack = pack.TemplatePack(templatepack_path) written_files = [] cookbook = create_new_cookbook(cookbook_name, cookbooks_home) for stencil_definition in cfg['stencils']: selected_stencil_set_name = stencil_definition.get('stencil_set') stencil_set = template_pack.load_stencil_set(selected_stencil_set_name) selected_stencil_name = _determine_selected_stencil( stencil_set, stencil_definition ) stencil = stencil_set.get_stencil(selected_stencil_name, **stencil_definition) updated_cookbook = process_stencil( cookbook, cookbook_name, # in case no metadata.rb yet template_pack, force, stencil_set, stencil, written_files ) return written_files, updated_cookbook
Build a cookbook from a fastfood.json file. Can build on an existing cookbook, otherwise this will create a new cookbook for you based on your templatepack.
entailment
def process_stencil(cookbook, cookbook_name, template_pack, force_argument, stencil_set, stencil, written_files): """Process the stencil requested, writing any missing files as needed. The stencil named 'stencilset_name' should be one of templatepack's stencils. """ # force can be passed on the command line or forced in a stencil's options force = force_argument or stencil['options'].get('force', False) stencil['files'] = stencil.get('files') or {} files = { # files.keys() are template paths, files.values() are target paths # {path to template: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['files'].items() } stencil['partials'] = stencil.get('partials') or {} partials = { # files.keys() are template paths, files.values() are target paths # {path to template: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['partials'].items() } stencil['binaries'] = stencil.get('binaries') or {} binaries = { # files.keys() are binary paths, files.values() are target paths # {path to binary: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['binaries'].items() } template_map = _build_template_map(cookbook, cookbook_name, stencil) filetable = templating.render_templates(*files.keys(), **template_map) _render_templates(files, filetable, written_files, force) parttable = templating.render_templates(*partials.keys(), **template_map) _render_templates(partials, parttable, written_files, force, open_mode='a') # no templating needed for binaries, just pass off to the copy method _render_binaries(binaries, written_files) # merge metadata.rb dependencies stencil_metadata_deps = {'depends': stencil.get('dependencies', {})} stencil_metadata = book.MetadataRb.from_dict(stencil_metadata_deps) cookbook.metadata.merge(stencil_metadata) # merge Berksfile dependencies stencil_berks_deps = {'cookbook': stencil.get('berks_dependencies', {})} stencil_berks = book.Berksfile.from_dict(stencil_berks_deps) cookbook.berksfile.merge(stencil_berks) return cookbook
Process the stencil requested, writing any missing files as needed. The stencil named 'stencilset_name' should be one of templatepack's stencils.
entailment
def create_new_cookbook(cookbook_name, cookbooks_home): """Create a new cookbook. :param cookbook_name: Name of the new cookbook. :param cookbooks_home: Target dir for new cookbook. """ cookbooks_home = utils.normalize_path(cookbooks_home) if not os.path.exists(cookbooks_home): raise ValueError("Target cookbook dir %s does not exist." % os.path.relpath(cookbooks_home)) target_dir = os.path.join(cookbooks_home, cookbook_name) LOG.debug("Creating dir -> %s", target_dir) try: os.makedirs(target_dir) except OSError as err: if err.errno != errno.EEXIST: raise else: LOG.info("Skipping existing directory %s", target_dir) cookbook_path = os.path.join(cookbooks_home, cookbook_name) cookbook = book.CookBook(cookbook_path) return cookbook
Create a new cookbook. :param cookbook_name: Name of the new cookbook. :param cookbooks_home: Target dir for new cookbook.
entailment
def ruby_lines(text): """Tidy up lines from a file, honor # comments. Does not honor ruby block comments (yet). """ if isinstance(text, basestring): text = text.splitlines() elif not isinstance(text, list): raise TypeError("text should be a list or a string, not %s" % type(text)) return [l.strip() for l in text if l.strip() and not l.strip().startswith('#')]
Tidy up lines from a file, honor # comments. Does not honor ruby block comments (yet).
entailment
def deepupdate(original, update, levels=5): """Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0 """ if not isinstance(update, dict): update = dict(update) if not levels > 0: original.update(update) else: for key, val in update.items(): if isinstance(original.get(key), dict): # might need a force=True to override this if not isinstance(val, dict): raise TypeError("Trying to update dict %s with " "non-dict %s" % (original[key], val)) deepupdate(original[key], val, levels=levels-1) else: original.update({key: val})
Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0
entailment
def write_statements(self, statements): """Insert the statements into the file neatly. Ex: statements = ["good 'dog'", "good 'cat'", "bad 'rat'", "fat 'emu'"] # stream.txt ... animals = FileWrapper(open('stream.txt')) good 'cow' nice 'man' bad 'news' animals.write_statements(statements) # stream.txt good 'cow' good 'dog' good 'cat' nice 'man' bad 'news' bad 'rat' fat 'emu' """ self.seek(0) original_content_lines = self.readlines() new_content_lines = copy.copy(original_content_lines) # ignore blanks and sort statements to be written statements = sorted([stmnt for stmnt in statements if stmnt]) # find all the insert points for each statement uniqs = {stmnt.split(None, 1)[0] for stmnt in statements} insert_locations = {} for line in reversed(original_content_lines): if not uniqs: break if not line: continue for word in uniqs.copy(): if line.startswith(word): index = original_content_lines.index(line) + 1 insert_locations[word] = index uniqs.remove(word) for statement in statements: print("writing to %s : %s" % (self, statement)) startswith = statement.split(None, 1)[0] # insert new statement with similar OR at the end of the file new_content_lines.insert( insert_locations.get(startswith, len(new_content_lines)), statement) if new_content_lines != original_content_lines: self.seek(0) self.writelines(new_content_lines) self.flush()
Insert the statements into the file neatly. Ex: statements = ["good 'dog'", "good 'cat'", "bad 'rat'", "fat 'emu'"] # stream.txt ... animals = FileWrapper(open('stream.txt')) good 'cow' nice 'man' bad 'news' animals.write_statements(statements) # stream.txt good 'cow' good 'dog' good 'cat' nice 'man' bad 'news' bad 'rat' fat 'emu'
entailment
def _fastfood_build(args): """Run on `fastfood build`.""" written_files, cookbook = food.build_cookbook( args.config_file, args.template_pack, args.cookbooks, args.force) if len(written_files) > 0: print("%s: %s files written" % (cookbook, len(written_files))) else: print("%s up to date" % cookbook) return written_files, cookbook
Run on `fastfood build`.
entailment
def _fastfood_list(args): """Run on `fastfood list`.""" template_pack = pack.TemplatePack(args.template_pack) if args.stencil_set: stencil_set = template_pack.load_stencil_set(args.stencil_set) print("Available Stencils for %s:" % args.stencil_set) for stencil in stencil_set.stencils: print(" %s" % stencil) else: print('Available Stencil Sets:') for name, vals in template_pack.stencil_sets.items(): print(" %12s - %12s" % (name, vals['help']))
Run on `fastfood list`.
entailment
def _fastfood_show(args): """Run on `fastfood show`.""" template_pack = pack.TemplatePack(args.template_pack) if args.stencil_set: stencil_set = template_pack.load_stencil_set(args.stencil_set) print("Stencil Set %s:" % args.stencil_set) print(' Stencils:') for stencil in stencil_set.stencils: print(" %s" % stencil) print(' Options:') for opt, vals in stencil_set.manifest['options'].items(): print(" %s - %s" % (opt, vals['help']))
Run on `fastfood show`.
entailment
def _release_info(): """Check latest fastfood release info from PyPI.""" pypi_url = 'http://pypi.python.org/pypi/fastfood/json' headers = { 'Accept': 'application/json', } request = urllib.Request(pypi_url, headers=headers) response = urllib.urlopen(request).read().decode('utf_8') data = json.loads(response) return data
Check latest fastfood release info from PyPI.
entailment
def getenv(option_name, default=None): """Return the option from the environment in the FASTFOOD namespace.""" env = "%s_%s" % (NAMESPACE.upper(), option_name.upper()) return os.environ.get(env, default)
Return the option from the environment in the FASTFOOD namespace.
entailment
def main(argv=None): """fastfood command line interface.""" # pylint: disable=missing-docstring import argparse import traceback class HelpfulParser(argparse.ArgumentParser): def error(self, message, print_help=False): if 'too few arguments' in message: sys.argv.insert(0, os.path.basename(sys.argv.pop(0))) message = ("%s. Try getting help with `%s -h`" % (message, " ".join(sys.argv))) if print_help: self.print_help() sys.stderr.write('\nerror: %s\n' % message) sys.exit(2) parser = HelpfulParser( prog=NAMESPACE, description=__doc__.splitlines()[0], epilog="\n".join(__doc__.splitlines()[1:]), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) version_string = 'version %s' % fastfood.__version__ parser.description = '%s ( %s )' % (parser.description, version_string) # version_group = subparsers.add_group() version_group = parser.add_argument_group( title='version info', description='Use these arguments to get version info.') vers_arg = version_group.add_argument( '-V', '--version', action='version', help="Return the current fastfood version.", version='%s %s' % (parser.prog, version_string)) class LatestVersionAction(vers_arg.__class__): def __call__(self, prsr, *args, **kw): info = _release_info() vers = info['info']['version'] release = info['releases'][vers][0] uploaded = datetime.strptime( release['upload_time'], '%Y-%m-%dT%H:%M:%S') sym = EXCLAIM if vers != fastfood.__version__ else CHECK message = u"{} fastfood version {} uploaded {}\n" message = message.format(sym, vers, uploaded.ctime()) prsr.exit(message=message) version_group.add_argument( '-L', '--latest', action=LatestVersionAction, help="Lookup the latest relase from PyPI.") verbose = parser.add_mutually_exclusive_group() verbose.add_argument('-v', dest='loglevel', action='store_const', const=logging.INFO, help="Set log-level to INFO.") verbose.add_argument('-vv', dest='loglevel', action='store_const', const=logging.DEBUG, help="Set log-level to DEBUG.") parser.set_defaults(loglevel=logging.WARNING) home = os.getenv('HOME') or os.path.expanduser('~') or os.getcwd() parser.add_argument( '--template-pack', help='template pack location', default=getenv( 'template_pack', os.path.join(home, '.fastfood'))) parser.add_argument( '--cookbooks', help='cookbooks directory', default=getenv( 'cookbooks', os.path.join(home, 'cookbooks'))) subparsers = parser.add_subparsers( dest='_subparsers', title='fastfood commands', description='operations...', help='...') # # `fastfood list` # list_parser = subparsers.add_parser( 'list', help='List available stencils', formatter_class=argparse.ArgumentDefaultsHelpFormatter) list_parser.add_argument('stencil_set', nargs='?', help="Stencil set to list stencils from") list_parser.set_defaults(func=_fastfood_list) # # `fastfood show <stencil_set>` # show_parser = subparsers.add_parser( 'show', help='Show stencil set information', formatter_class=argparse.ArgumentDefaultsHelpFormatter) show_parser.add_argument('stencil_set', help="Stencil set to list stencils from") show_parser.set_defaults(func=_fastfood_show) # # `fastfood build` # build_parser = subparsers.add_parser( 'build', help='Create or update a cookbook using a config', formatter_class=argparse.ArgumentDefaultsHelpFormatter) build_parser.add_argument('config_file', help="JSON config file") build_parser.add_argument('--force', '-f', action='store_true', default=False, help="Overwrite existing files.") build_parser.set_defaults(func=_fastfood_build) setattr(_LOCAL, 'argparser', parser) if not argv: argv = None args = parser.parse_args(args=argv) if hasattr(args, 'options'): args.options = {k: v for k, v in args.options} logging.basicConfig(level=args.loglevel) try: args.func(args) except exc.FastfoodError as err: title = exc.get_friendly_title(err) print('%s %s: %s' % (RED_X, title, str(err)), file=sys.stderr) sys.stderr.flush() sys.exit(1) except Exception as err: print('%s Unexpected error. Please report this traceback.' % INTERROBANG, file=sys.stderr) traceback.print_exc() # todo: tracack in -v or -vv mode? sys.stderr.flush() sys.exit(1) except KeyboardInterrupt: sys.exit("\nStahp")
fastfood command line interface.
entailment
def get_friendly_title(err): """Turn class, instance, or name (str) into an eyeball-friendly title. E.g. FastfoodStencilSetNotListed --> 'Stencil Set Not Listed' """ if isinstance(err, basestring): string = err else: try: string = err.__name__ except AttributeError: string = err.__class__.__name__ split = _SPLITCASE_RE.findall(string) if not split: split.append(string) if len(split) > 1 and split[0] == 'Fastfood': split.pop(0) return " ".join(split)
Turn class, instance, or name (str) into an eyeball-friendly title. E.g. FastfoodStencilSetNotListed --> 'Stencil Set Not Listed'
entailment
def _validate(self, key, cls=None): """Verify the manifest schema.""" if key not in self.manifest: raise ValueError("Manifest %s requires '%s'." % (self.manifest_path, key)) if cls: if not isinstance(self.manifest[key], cls): raise TypeError("Manifest value '%s' should be %s, not %s" % (key, cls, type(self.manifest[key])))
Verify the manifest schema.
entailment
def stencil_sets(self): """List of stencil sets.""" if not self._stencil_sets: self._stencil_sets = self.manifest['stencil_sets'] return self._stencil_sets
List of stencil sets.
entailment
def load_stencil_set(self, stencilset_name): """Return the Stencil Set from this template pack.""" if stencilset_name not in self._stencil_sets: if stencilset_name not in self.manifest['stencil_sets'].keys(): raise exc.FastfoodStencilSetNotListed( "Stencil set '%s' not listed in %s under stencil_sets." % (stencilset_name, self.manifest_path)) stencil_path = os.path.join( self.path, 'stencils', stencilset_name) self._stencil_sets[stencilset_name] = ( stencil_module.StencilSet(stencil_path)) return self._stencil_sets[stencilset_name]
Return the Stencil Set from this template pack.
entailment
def qstring(option): """Custom quoting method for jinja.""" if (re.match(NODE_ATTR_RE, option) is None and re.match(CHEF_CONST_RE, option) is None): return "'%s'" % option else: return option
Custom quoting method for jinja.
entailment
def render_templates_generator(*files, **template_map): """Render jinja templates according to template_map. Yields (path, result) """ for path in files: if not os.path.isfile(path): raise ValueError("Template file %s not found" % os.path.relpath(path)) else: try: with codecs.open(path, encoding='utf-8') as f: text = f.read() template = JINJA_ENV.from_string(text) except jinja2.TemplateSyntaxError as err: msg = ("Error rendering jinja2 template for file %s " "on line %s. Error: %s" % (path, err.lineno, err.message)) raise type(err)( msg, err.lineno, filename=os.path.basename(path)) result = template.render(**template_map) if not result.endswith('\n'): result += '\n' yield path, result
Render jinja templates according to template_map. Yields (path, result)
entailment
def generate(self, model, outfolder): """ Generate artifacts for given model. Attributes: model: Model for which to generate code. outfolder: Folder where code files are created. """ _logger.info('Generating code to {!r}.'.format(outfolder)) for task in self.tasks: for element in task.filtered_elements(model): task.run(element, outfolder)
Generate artifacts for given model. Attributes: model: Model for which to generate code. outfolder: Folder where code files are created.
entailment
def run(self, element, outfolder): """Apply this task to model element.""" filepath = self.relative_path_for_element(element) if outfolder and not os.path.isabs(filepath): filepath = os.path.join(outfolder, filepath) _logger.debug('{!r} --> {!r}'.format(element, filepath)) self.ensure_folder(filepath) self.generate_file(element, filepath)
Apply this task to model element.
entailment
def create_template_context(self, element, **kwargs): """Code generation context, specific to template and current element.""" context = dict(element=element, **kwargs) if self.global_context: context.update(**self.global_context) return context
Code generation context, specific to template and current element.
entailment
def _validated(self, value): """Format the value or raise a :exc:`ValidationError` if an error occurs.""" if value is None: return None if isinstance(value, bson.ObjectId): return value try: return bson.ObjectId(value) except (ValueError, AttributeError): self.fail('invalid_object_id')
Format the value or raise a :exc:`ValidationError` if an error occurs.
entailment
def create_environment(self, **kwargs): """ Return a new Jinja environment. Derived classes may override method to pass additional parameters or to change the template loader type. """ return jinja2.Environment( loader=jinja2.FileSystemLoader(self.templates_path), **kwargs )
Return a new Jinja environment. Derived classes may override method to pass additional parameters or to change the template loader type.
entailment
def pairs(a): """Return array of pairs of adjacent elements in a. >>> pairs([1, 2, 3, 4]) array([[1, 2], [2, 3], [3, 4]]) """ a = np.asarray(a) return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2)
Return array of pairs of adjacent elements in a. >>> pairs([1, 2, 3, 4]) array([[1, 2], [2, 3], [3, 4]])
entailment
def transcript_sort_key(transcript): """ Key function used to sort transcripts. Taking the negative of protein sequence length and nucleotide sequence length so that the transcripts with longest sequences come first in the list. This couldn't be accomplished with `reverse=True` since we're also sorting by transcript name (which places TP53-001 before TP53-002). """ return ( -len(transcript.protein_sequence), -len(transcript.sequence), transcript.name )
Key function used to sort transcripts. Taking the negative of protein sequence length and nucleotide sequence length so that the transcripts with longest sequences come first in the list. This couldn't be accomplished with `reverse=True` since we're also sorting by transcript name (which places TP53-001 before TP53-002).
entailment
def best_transcript(transcripts): """ Given a set of coding transcripts, choose the one with the longest protein sequence and in cases of ties use the following tie-breaking criteria: - transcript sequence (including UTRs) - transcript name (so TP53-001 should come before TP53-202) """ assert len(transcripts) > 0 sorted_list = sorted(transcripts, key=transcript_sort_key) return sorted_list[0]
Given a set of coding transcripts, choose the one with the longest protein sequence and in cases of ties use the following tie-breaking criteria: - transcript sequence (including UTRs) - transcript name (so TP53-001 should come before TP53-202)
entailment
def predict_epitopes_from_args(args): """ Returns an epitope collection from the given commandline arguments. Parameters ---------- args : argparse.Namespace Parsed commandline arguments for Topiary """ mhc_model = mhc_binding_predictor_from_args(args) variants = variant_collection_from_args(args) gene_expression_dict = rna_gene_expression_dict_from_args(args) transcript_expression_dict = rna_transcript_expression_dict_from_args(args) predictor = TopiaryPredictor( mhc_model=mhc_model, padding_around_mutation=args.padding_around_mutation, ic50_cutoff=args.ic50_cutoff, percentile_cutoff=args.percentile_cutoff, min_transcript_expression=args.rna_min_transcript_expression, min_gene_expression=args.rna_min_gene_expression, only_novel_epitopes=args.only_novel_epitopes, raise_on_error=not args.skip_variant_errors) return predictor.predict_from_variants( variants=variants, transcript_expression_dict=transcript_expression_dict, gene_expression_dict=gene_expression_dict)
Returns an epitope collection from the given commandline arguments. Parameters ---------- args : argparse.Namespace Parsed commandline arguments for Topiary
entailment
def get_random_word(dictionary, min_word_length=3, max_word_length=8): """ Returns a random word from the dictionary """ while True: # Choose a random word word = choice(dictionary) # Stop looping as soon as we have a valid candidate if len(word) >= min_word_length and len(word) <= max_word_length: break return word
Returns a random word from the dictionary
entailment
def pw(min_word_length=3, max_word_length=8, max_int_value=1000, number_of_elements=4, no_special_characters=False): """ Generate a password """ # Set the position of the integer int_position = set_int_position(number_of_elements) # Load dictionary dictionary = load_dictionary() password = '' for i in range(number_of_elements): # Add word or integer if i == int_position: password += str(get_random_int(max_int_value)) else: password += get_random_word(dictionary, min_word_length, max_word_length).title() # Add separator if i != number_of_elements - 1: password += get_random_separator(no_special_characters) return password
Generate a password
entailment
def main(): """ Main method """ # Options global args parser = argparse.ArgumentParser() parser.add_argument("-n", "--min_word_length", type=int, help="Minimum length for each word", default=3) parser.add_argument("-x", "--max_word_length", type=int, help="Maximum length for each word", default=8) parser.add_argument("-i", "--max_int_value", type=int, help="Maximum value for the integer", default=1000) parser.add_argument("-e", "--number_of_elements", type=int, help="Number of elements in the password (ie. 4 = 3 words + 1 integer)", default=4) parser.add_argument("-s", "--no_special_characters", action='store_true', help="Do not use special characters") args = parser.parse_args() # Print a password print(pw(min_word_length=args.min_word_length, max_word_length=args.max_word_length, max_int_value=args.max_int_value, number_of_elements=args.number_of_elements, no_special_characters=args.no_special_characters))
Main method
entailment
def protein_subsequences_around_mutations(effects, padding_around_mutation): """ From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets. """ protein_subsequences = {} protein_subsequence_start_offsets = {} for effect in effects: protein_sequence = effect.mutant_protein_sequence # some effects will lack a mutant protein sequence since # they are either silent or unpredictable if protein_sequence: mutation_start = effect.aa_mutation_start_offset mutation_end = effect.aa_mutation_end_offset seq_start_offset = max( 0, mutation_start - padding_around_mutation) # some pseudogenes have stop codons in the reference sequence, # if we try to use them for epitope prediction we should trim # the sequence to not include the stop character '*' first_stop_codon_index = protein_sequence.find("*") if first_stop_codon_index < 0: first_stop_codon_index = len(protein_sequence) seq_end_offset = min( first_stop_codon_index, mutation_end + padding_around_mutation) subsequence = protein_sequence[seq_start_offset:seq_end_offset] protein_subsequences[effect] = subsequence protein_subsequence_start_offsets[effect] = seq_start_offset return protein_subsequences, protein_subsequence_start_offsets
From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets.
entailment
def check_padding_around_mutation(given_padding, epitope_lengths): """ If user doesn't provide any padding around the mutation we need to at least include enough of the surrounding non-mutated esidues to construct candidate epitopes of the specified lengths. """ min_required_padding = max(epitope_lengths) - 1 if not given_padding: return min_required_padding else: require_integer(given_padding, "Padding around mutation") if given_padding < min_required_padding: raise ValueError( "Padding around mutation %d cannot be less than %d " "for epitope lengths %s" % ( given_padding, min_required_padding, epitope_lengths)) return given_padding
If user doesn't provide any padding around the mutation we need to at least include enough of the surrounding non-mutated esidues to construct candidate epitopes of the specified lengths.
entailment
def peptide_mutation_interval( peptide_start_in_protein, peptide_length, mutation_start_in_protein, mutation_end_in_protein): """ Half-open interval of mutated residues in the peptide, determined from the mutation interval in the original protein sequence. Parameters ---------- peptide_start_in_protein : int Position of the first peptide residue within the protein (starting from 0) peptide_length : int mutation_start_in_protein : int Position of the first mutated residue starting from 0. In the case of a deletion, the position where the first residue had been. mutation_end_in_protein : int Position of the last mutated residue in the mutant protein. In the case of a deletion, this is equal to the mutation_start_in_protein. ) """ if peptide_start_in_protein > mutation_end_in_protein: raise ValueError("Peptide starts after mutation") elif peptide_start_in_protein + peptide_length < mutation_start_in_protein: raise ValueError("Peptide ends before mutation") # need a half-open start/end interval peptide_mutation_start_offset = min( peptide_length, max(0, mutation_start_in_protein - peptide_start_in_protein)) peptide_mutation_end_offset = min( peptide_length, max(0, mutation_end_in_protein - peptide_start_in_protein)) return (peptide_mutation_start_offset, peptide_mutation_end_offset)
Half-open interval of mutated residues in the peptide, determined from the mutation interval in the original protein sequence. Parameters ---------- peptide_start_in_protein : int Position of the first peptide residue within the protein (starting from 0) peptide_length : int mutation_start_in_protein : int Position of the first mutated residue starting from 0. In the case of a deletion, the position where the first residue had been. mutation_end_in_protein : int Position of the last mutated residue in the mutant protein. In the case of a deletion, this is equal to the mutation_start_in_protein. )
entailment
def from_dict(cls, indicator): """ Create an indicator object from a dictionary. :param indicator: The dictionary. :return: The indicator object. """ tags = indicator.get('tags') if tags is not None: tags = [Tag.from_dict(tag) for tag in tags] return Indicator(value=indicator.get('value'), type=indicator.get('indicatorType'), priority_level=indicator.get('priorityLevel'), correlation_count=indicator.get('correlationCount'), whitelisted=indicator.get('whitelisted'), weight=indicator.get('weight'), reason=indicator.get('reason'), first_seen=indicator.get('firstSeen'), last_seen=indicator.get('lastSeen'), sightings=indicator.get('sightings'), source=indicator.get('source'), notes=indicator.get('notes'), tags=tags, enclave_ids=indicator.get('enclaveIds'))
Create an indicator object from a dictionary. :param indicator: The dictionary. :return: The indicator object.
entailment
def to_dict(self, remove_nones=False): """ Creates a dictionary representation of the indicator. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the indicator. """ if remove_nones: return super().to_dict(remove_nones=True) tags = None if self.tags is not None: tags = [tag.to_dict(remove_nones=remove_nones) for tag in self.tags] return { 'value': self.value, 'indicatorType': self.type, 'priorityLevel': self.priority_level, 'correlationCount': self.correlation_count, 'whitelisted': self.whitelisted, 'weight': self.weight, 'reason': self.reason, 'firstSeen': self.first_seen, 'lastSeen': self.last_seen, 'source': self.source, 'notes': self.notes, 'tags': tags, 'enclaveIds': self.enclave_ids }
Creates a dictionary representation of the indicator. :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``. :return: A dictionary representation of the indicator.
entailment
def filter_false_positive(df, process_time): """ method that takes in FireEye (FE) alerts and filters FE-tests and False Positives :param process_time: :param df: :return: """ result = [] track = [] count = 0 for o in df['alerts']: count += 1 if 'closedState' in o: if o['closedState'] != 'False Positive': if 'distinguishers' in o: try: if 'virus' in o['distinguishers']: if o['distinguishers']['virus'] != 'fetestevent': result.append(o) else: track.append(o) # Track fetestevents that are skipped else: result.append(o) except TypeError: result.append(o) else: result.append(o) else: track.append(o) # Track false positives that are skipped elif 'distinguishers' in o: try: if 'virus' in o['distinguishers']: if o['distinguishers']['virus'] != 'fetestevent': result.append(o) else: track.append(o) # Track fetestevents that are skipped else: result.append(o) except TypeError: result.append(o) trackfile = open('tracking_fetest_' + process_time + '.txt', 'w') numskip = 1 for item in track: trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item)) numskip += 1 return result
method that takes in FireEye (FE) alerts and filters FE-tests and False Positives :param process_time: :param df: :return:
entailment
def filter_webapp_attack(df, process_time): """ A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye :param df: a DataFrame object :param process_time: :return: """ result = [] track = [] for o in df: if 'METHODOLOGY - WEB APP ATTACK' in o['message']: track.append(o) else: result.append(o) trackfile = open('tracking_webAppAttack_' + process_time + '.txt', 'w') numskip = 1 for item in track: trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item)) numskip += 1 return result
A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye :param df: a DataFrame object :param process_time: :return:
entailment
def process_alert(file_name): """ A function that removes the alerts property from the FireEye alert and transform the data into a JSON ready format :param file_name: :return: """ processed_line = open(file_name, 'r').read() char_pos = processed_line.find("}") new_line = "{" + processed_line[char_pos + 2:] return new_line
A function that removes the alerts property from the FireEye alert and transform the data into a JSON ready format :param file_name: :return:
entailment
def apply_filter( filter_fn, collection, result_fn=None, filter_name="", collection_name=""): """ Apply filter to effect collection and print number of dropped elements Parameters ---------- """ n_before = len(collection) filtered = [x for x in collection if filter_fn(x)] n_after = len(filtered) if not collection_name: collection_name = collection.__class__.__name__ logging.info( "%s filtering removed %d/%d entries of %s", filter_name, (n_before - n_after), n_before, collection_name) return result_fn(filtered) if result_fn else collection.__class__(filtered)
Apply filter to effect collection and print number of dropped elements Parameters ----------
entailment
def filter_silent_and_noncoding_effects(effects): """ Keep only variant effects which result in modified proteins. Parameters ---------- effects : varcode.EffectCollection """ return apply_filter( filter_fn=lambda effect: isinstance(effect, NonsilentCodingMutation), collection=effects, result_fn=effects.clone_with_new_elements, filter_name="Silent mutation")
Keep only variant effects which result in modified proteins. Parameters ---------- effects : varcode.EffectCollection
entailment
def apply_variant_expression_filters( variants, gene_expression_dict, gene_expression_threshold, transcript_expression_dict, transcript_expression_threshold): """ Filter a collection of variants by gene and transcript expression thresholds Parameters ---------- variants : varcode.VariantCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float """ if gene_expression_dict: variants = apply_filter( lambda variant: any( gene_expression_dict.get(gene_id, 0.0) >= gene_expression_threshold for gene_id in variant.gene_ids ), variants, result_fn=variants.clone_with_new_elements, filter_name="Variant gene expression (min=%0.4f)" % gene_expression_threshold) if transcript_expression_dict: variants = apply_filter( lambda variant: any( transcript_expression_dict.get(transcript_id, 0.0) >= transcript_expression_threshold for transcript_id in variant.transcript_ids ), variants, result_fn=variants.clone_with_new_elements, filter_name=( "Variant transcript expression (min=%0.4f)" % ( transcript_expression_threshold,))) return variants
Filter a collection of variants by gene and transcript expression thresholds Parameters ---------- variants : varcode.VariantCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float
entailment
def apply_effect_expression_filters( effects, gene_expression_dict, gene_expression_threshold, transcript_expression_dict, transcript_expression_threshold): """ Filter collection of varcode effects by given gene and transcript expression thresholds. Parameters ---------- effects : varcode.EffectCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float """ if gene_expression_dict: effects = apply_filter( lambda effect: ( gene_expression_dict.get(effect.gene_id, 0.0) >= gene_expression_threshold), effects, result_fn=effects.clone_with_new_elements, filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold) if transcript_expression_dict: effects = apply_filter( lambda effect: ( transcript_expression_dict.get(effect.transcript_id, 0.0) >= transcript_expression_threshold ), effects, result_fn=effects.clone_with_new_elements, filter_name=( "Effect transcript expression (min=%0.4f)" % ( transcript_expression_threshold,))) return effects
Filter collection of varcode effects by given gene and transcript expression thresholds. Parameters ---------- effects : varcode.EffectCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float
entailment
def submit_indicators(self, indicators, enclave_ids=None, tags=None): """ Submit indicators directly. The indicator field ``value`` is required; all other metadata fields are optional: ``firstSeen``, ``lastSeen``, ``sightings``, ``notes``, and ``source``. The submission must specify enclaves for the indicators to be submitted to, and can optionally specify tags to assign to all the indicators in the submission, and/or include individual tags in each Indicator (which will take precedence over the submission tags). The tags can be existing or new, and are identified by ``name`` and ``enclaveId``. (Passing the GUID of an existing tag is not allowed. ``name`` and ``enclaveId`` must be specified for each tag.) This function uses the API endpoint discussed here: https://docs.trustar.co/api/v13/indicators/submit_indicators.html Note that |Indicator| class attribute names are often slightly different from the API endpoint's parameters. (EX: The |Indicator| class's ``first_seen`` attribute corresponds to the API endpoint's ``firstSeen`` parameter.) :param list(Indicator) indicators: a list of |Indicator| objects. Indicator's ``value`` is required, all other attributes can be Null. These |Indicator| attributes can be modified / updated using this function: ``value``, ``first_seen``, ``last_seen``, ``sightings``, ``source``, ``notes``, and ``tags``. No other |Indicator| attributes can be modified in TruSTAR by using this function. :param list(string) enclave_ids: a list of enclave IDs. :param list(string) tags: a list of |Tag| objects that will be applied to ALL indicators in the submission. """ if enclave_ids is None: enclave_ids = self.enclave_ids if tags is not None: tags = [tag.to_dict() for tag in tags] body = { "enclaveIds": enclave_ids, "content": [indicator.to_dict() for indicator in indicators], "tags": tags } self._client.post("indicators", data=json.dumps(body))
Submit indicators directly. The indicator field ``value`` is required; all other metadata fields are optional: ``firstSeen``, ``lastSeen``, ``sightings``, ``notes``, and ``source``. The submission must specify enclaves for the indicators to be submitted to, and can optionally specify tags to assign to all the indicators in the submission, and/or include individual tags in each Indicator (which will take precedence over the submission tags). The tags can be existing or new, and are identified by ``name`` and ``enclaveId``. (Passing the GUID of an existing tag is not allowed. ``name`` and ``enclaveId`` must be specified for each tag.) This function uses the API endpoint discussed here: https://docs.trustar.co/api/v13/indicators/submit_indicators.html Note that |Indicator| class attribute names are often slightly different from the API endpoint's parameters. (EX: The |Indicator| class's ``first_seen`` attribute corresponds to the API endpoint's ``firstSeen`` parameter.) :param list(Indicator) indicators: a list of |Indicator| objects. Indicator's ``value`` is required, all other attributes can be Null. These |Indicator| attributes can be modified / updated using this function: ``value``, ``first_seen``, ``last_seen``, ``sightings``, ``source``, ``notes``, and ``tags``. No other |Indicator| attributes can be modified in TruSTAR by using this function. :param list(string) enclave_ids: a list of enclave IDs. :param list(string) tags: a list of |Tag| objects that will be applied to ALL indicators in the submission.
entailment
def get_indicators(self, from_time=None, to_time=None, enclave_ids=None, included_tag_ids=None, excluded_tag_ids=None, start_page=0, page_size=None): """ Creates a generator from the |get_indicators_page| method that returns each successive indicator as an |Indicator| object containing values for the 'value' and 'type' attributes only; all other |Indicator| object attributes will contain Null values. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago). :param int to_time: end of time window in milliseconds since epoch (defaults to current time). :param list(string) enclave_ids: a list of enclave IDs from which to get indicators from. :param list(string) included_tag_ids: only indicators containing ALL of these tag GUIDs will be returned. :param list(string) excluded_tag_ids: only indicators containing NONE of these tags GUIDs be returned. :param int start_page: see 'page_size' explanation. :param int page_size: Passing the integer 1000 as the argument to this parameter should result in your script making fewer API calls because it returns the largest quantity of indicators with each API call. An API call has to be made to fetch each |Page|. :return: A generator of |Indicator| objects containing values for the "value" and "type" attributes only. All other attributes of the |Indicator| object will contain Null values. """ indicators_page_generator = self._get_indicators_page_generator( from_time=from_time, to_time=to_time, enclave_ids=enclave_ids, included_tag_ids=included_tag_ids, excluded_tag_ids=excluded_tag_ids, page_number=start_page, page_size=page_size ) indicators_generator = Page.get_generator(page_generator=indicators_page_generator) return indicators_generator
Creates a generator from the |get_indicators_page| method that returns each successive indicator as an |Indicator| object containing values for the 'value' and 'type' attributes only; all other |Indicator| object attributes will contain Null values. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago). :param int to_time: end of time window in milliseconds since epoch (defaults to current time). :param list(string) enclave_ids: a list of enclave IDs from which to get indicators from. :param list(string) included_tag_ids: only indicators containing ALL of these tag GUIDs will be returned. :param list(string) excluded_tag_ids: only indicators containing NONE of these tags GUIDs be returned. :param int start_page: see 'page_size' explanation. :param int page_size: Passing the integer 1000 as the argument to this parameter should result in your script making fewer API calls because it returns the largest quantity of indicators with each API call. An API call has to be made to fetch each |Page|. :return: A generator of |Indicator| objects containing values for the "value" and "type" attributes only. All other attributes of the |Indicator| object will contain Null values.
entailment
def _get_indicators_page_generator(self, from_time=None, to_time=None, page_number=0, page_size=None, enclave_ids=None, included_tag_ids=None, excluded_tag_ids=None): """ Creates a generator from the |get_indicators_page| method that returns each successive page. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago) :param int to_time: end of time window in milliseconds since epoch (defaults to current time) :param int page_number: the page number :param int page_size: the page size :param list(string) enclave_ids: a list of enclave IDs to filter by :param list(string) included_tag_ids: only indicators containing ALL of these tags will be returned :param list(string) excluded_tag_ids: only indicators containing NONE of these tags will be returned :return: a |Page| of |Indicator| objects """ get_page = functools.partial( self.get_indicators_page, from_time=from_time, to_time=to_time, page_number=page_number, page_size=page_size, enclave_ids=enclave_ids, included_tag_ids=included_tag_ids, excluded_tag_ids=excluded_tag_ids ) return Page.get_page_generator(get_page, page_number, page_size)
Creates a generator from the |get_indicators_page| method that returns each successive page. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago) :param int to_time: end of time window in milliseconds since epoch (defaults to current time) :param int page_number: the page number :param int page_size: the page size :param list(string) enclave_ids: a list of enclave IDs to filter by :param list(string) included_tag_ids: only indicators containing ALL of these tags will be returned :param list(string) excluded_tag_ids: only indicators containing NONE of these tags will be returned :return: a |Page| of |Indicator| objects
entailment
def get_indicators_page(self, from_time=None, to_time=None, page_number=None, page_size=None, enclave_ids=None, included_tag_ids=None, excluded_tag_ids=None): """ Get a page of indicators matching the provided filters. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago) :param int to_time: end of time window in milliseconds since epoch (defaults to current time) :param int page_number: the page number :param int page_size: the page size :param list(string) enclave_ids: a list of enclave IDs to filter by :param list(string) included_tag_ids: only indicators containing ALL of these tags will be returned :param list(string) excluded_tag_ids: only indicators containing NONE of these tags will be returned :return: a |Page| of indicators """ params = { 'from': from_time, 'to': to_time, 'pageSize': page_size, 'pageNumber': page_number, 'enclaveIds': enclave_ids, 'tagIds': included_tag_ids, 'excludedTagIds': excluded_tag_ids } resp = self._client.get("indicators", params=params) page_of_indicators = Page.from_dict(resp.json(), content_type=Indicator) return page_of_indicators
Get a page of indicators matching the provided filters. :param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago) :param int to_time: end of time window in milliseconds since epoch (defaults to current time) :param int page_number: the page number :param int page_size: the page size :param list(string) enclave_ids: a list of enclave IDs to filter by :param list(string) included_tag_ids: only indicators containing ALL of these tags will be returned :param list(string) excluded_tag_ids: only indicators containing NONE of these tags will be returned :return: a |Page| of indicators
entailment
def search_indicators(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None): """ Uses the |search_indicators_page| method to create a generator that returns each successive indicator. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict indicators to specific enclaves (optional - by default indicators from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :return: The generator. """ return Page.get_generator(page_generator=self._search_indicators_page_generator(search_term, enclave_ids, from_time, to_time, indicator_types, tags, excluded_tags))
Uses the |search_indicators_page| method to create a generator that returns each successive indicator. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict indicators to specific enclaves (optional - by default indicators from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :return: The generator.
entailment
def _search_indicators_page_generator(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None, start_page=0, page_size=None): """ Creates a generator from the |search_indicators_page| method that returns each successive page. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict indicators to specific enclaves (optional - by default indicators from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int start_page: The page to start on. :param page_size: The size of each page. :return: The generator. """ get_page = functools.partial(self.search_indicators_page, search_term, enclave_ids, from_time, to_time, indicator_types, tags, excluded_tags) return Page.get_page_generator(get_page, start_page, page_size)
Creates a generator from the |search_indicators_page| method that returns each successive page. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict indicators to specific enclaves (optional - by default indicators from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int start_page: The page to start on. :param page_size: The size of each page. :return: The generator.
entailment
def search_indicators_page(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, indicator_types=None, tags=None, excluded_tags=None, page_size=None, page_number=None): """ Search for indicators containing a search term. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict to indicators found in reports in specific enclaves (optional - by default reports from all of the user's enclaves are used) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: a |Page| of |Indicator| objects. """ body = { 'searchTerm': search_term } params = { 'enclaveIds': enclave_ids, 'from': from_time, 'to': to_time, 'entityTypes': indicator_types, 'tags': tags, 'excludedTags': excluded_tags, 'pageSize': page_size, 'pageNumber': page_number } resp = self._client.post("indicators/search", params=params, data=json.dumps(body)) return Page.from_dict(resp.json(), content_type=Indicator)
Search for indicators containing a search term. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict to indicators found in reports in specific enclaves (optional - by default reports from all of the user's enclaves are used) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) indicator_types: a list of indicator types to filter by (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter indicators by. Only indicators containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Indicators containing ANY of these tags will be excluded from the results. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: a |Page| of |Indicator| objects.
entailment
def get_related_indicators(self, indicators=None, enclave_ids=None): """ Uses the |get_related_indicators_page| method to create a generator that returns each successive report. :param list(string) indicators: list of indicator values to search for :param list(string) enclave_ids: list of GUIDs of enclaves to search in :return: The generator. """ return Page.get_generator(page_generator=self._get_related_indicators_page_generator(indicators, enclave_ids))
Uses the |get_related_indicators_page| method to create a generator that returns each successive report. :param list(string) indicators: list of indicator values to search for :param list(string) enclave_ids: list of GUIDs of enclaves to search in :return: The generator.
entailment
def get_indicator_metadata(self, value): """ Provide metadata associated with a single indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param value: an indicator value to query. :return: A dict containing three fields: 'indicator' (an |Indicator| object), 'tags' (a list of |Tag| objects), and 'enclaveIds' (a list of enclave IDs that the indicator was found in). .. warning:: This method is deprecated. Please use |get_indicators_metadata| instead. """ result = self.get_indicators_metadata([Indicator(value=value)]) if len(result) > 0: indicator = result[0] return { 'indicator': indicator, 'tags': indicator.tags, 'enclaveIds': indicator.enclave_ids } else: return None
Provide metadata associated with a single indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param value: an indicator value to query. :return: A dict containing three fields: 'indicator' (an |Indicator| object), 'tags' (a list of |Tag| objects), and 'enclaveIds' (a list of enclave IDs that the indicator was found in). .. warning:: This method is deprecated. Please use |get_indicators_metadata| instead.
entailment
def get_indicators_metadata(self, indicators): """ Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types might be required to distinguish in a case where one indicator value has been associated with multiple types based on different contexts. :return: A list of |Indicator| objects. The following attributes of the objects will be returned: correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator objects will have Null values. """ data = [{ 'value': i.value, 'indicatorType': i.type } for i in indicators] resp = self._client.post("indicators/metadata", data=json.dumps(data)) return [Indicator.from_dict(x) for x in resp.json()]
Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings, lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has READ access to. :param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types might be required to distinguish in a case where one indicator value has been associated with multiple types based on different contexts. :return: A list of |Indicator| objects. The following attributes of the objects will be returned: correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator objects will have Null values.
entailment
def get_indicator_details(self, indicators, enclave_ids=None): """ NOTE: This method uses an API endpoint that is intended for internal use only, and is not officially supported. Provide a list of indicator values and obtain details for all of them, including indicator_type, priority_level, correlation_count, and whether they have been whitelisted. Note that the values provided must match indicator values in Station exactly. If the exact value of an indicator is not known, it should be obtained either through the search endpoint first. :param indicators: A list of indicator values of any type. :param enclave_ids: Only find details for indicators in these enclaves. :return: a list of |Indicator| objects with all fields (except possibly ``reason``) filled out """ # if the indicators parameter is a string, make it a singleton if isinstance(indicators, string_types): indicators = [indicators] params = { 'enclaveIds': enclave_ids, 'indicatorValues': indicators } resp = self._client.get("indicators/details", params=params) return [Indicator.from_dict(indicator) for indicator in resp.json()]
NOTE: This method uses an API endpoint that is intended for internal use only, and is not officially supported. Provide a list of indicator values and obtain details for all of them, including indicator_type, priority_level, correlation_count, and whether they have been whitelisted. Note that the values provided must match indicator values in Station exactly. If the exact value of an indicator is not known, it should be obtained either through the search endpoint first. :param indicators: A list of indicator values of any type. :param enclave_ids: Only find details for indicators in these enclaves. :return: a list of |Indicator| objects with all fields (except possibly ``reason``) filled out
entailment
def add_terms_to_whitelist(self, terms): """ Add a list of terms to the user's company's whitelist. :param terms: The list of terms to whitelist. :return: The list of extracted |Indicator| objects that were whitelisted. """ resp = self._client.post("whitelist", json=terms) return [Indicator.from_dict(indicator) for indicator in resp.json()]
Add a list of terms to the user's company's whitelist. :param terms: The list of terms to whitelist. :return: The list of extracted |Indicator| objects that were whitelisted.
entailment
def delete_indicator_from_whitelist(self, indicator): """ Delete an indicator from the user's company's whitelist. :param indicator: An |Indicator| object, representing the indicator to delete. """ params = indicator.to_dict() self._client.delete("whitelist", params=params)
Delete an indicator from the user's company's whitelist. :param indicator: An |Indicator| object, representing the indicator to delete.
entailment
def get_community_trends(self, indicator_type=None, days_back=None): """ Find indicators that are trending in the community. :param indicator_type: A type of indicator to filter by. If ``None``, will get all types of indicators except for MALWARE and CVEs (this convention is for parity with the corresponding view on the Dashboard). :param days_back: The number of days back to search. Any integer between 1 and 30 is allowed. :return: A list of |Indicator| objects. """ params = { 'type': indicator_type, 'daysBack': days_back } resp = self._client.get("indicators/community-trending", params=params) body = resp.json() # parse items in response as indicators return [Indicator.from_dict(indicator) for indicator in body]
Find indicators that are trending in the community. :param indicator_type: A type of indicator to filter by. If ``None``, will get all types of indicators except for MALWARE and CVEs (this convention is for parity with the corresponding view on the Dashboard). :param days_back: The number of days back to search. Any integer between 1 and 30 is allowed. :return: A list of |Indicator| objects.
entailment
def get_whitelist_page(self, page_number=None, page_size=None): """ Gets a paginated list of indicators that the user's company has whitelisted. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: A |Page| of |Indicator| objects. """ params = { 'pageNumber': page_number, 'pageSize': page_size } resp = self._client.get("whitelist", params=params) return Page.from_dict(resp.json(), content_type=Indicator)
Gets a paginated list of indicators that the user's company has whitelisted. :param int page_number: the page number to get. :param int page_size: the size of the page to be returned. :return: A |Page| of |Indicator| objects.
entailment
def get_related_indicators_page(self, indicators=None, enclave_ids=None, page_size=None, page_number=None): """ Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects. """ params = { 'indicators': indicators, 'enclaveIds': enclave_ids, 'pageNumber': page_number, 'pageSize': page_size } resp = self._client.get("indicators/related", params=params) return Page.from_dict(resp.json(), content_type=Indicator)
Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects.
entailment
def _get_indicators_for_report_page_generator(self, report_id, start_page=0, page_size=None): """ Creates a generator from the |get_indicators_for_report_page| method that returns each successive page. :param str report_id: The ID of the report to get indicators for. :param int start_page: The page to start on. :param int page_size: The size of each page. :return: The generator. """ get_page = functools.partial(self.get_indicators_for_report_page, report_id=report_id) return Page.get_page_generator(get_page, start_page, page_size)
Creates a generator from the |get_indicators_for_report_page| method that returns each successive page. :param str report_id: The ID of the report to get indicators for. :param int start_page: The page to start on. :param int page_size: The size of each page. :return: The generator.
entailment
def _get_related_indicators_page_generator(self, indicators=None, enclave_ids=None, start_page=0, page_size=None): """ Creates a generator from the |get_related_indicators_page| method that returns each successive page. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param start_page: The page to start on. :param page_size: The size of each page. :return: The generator. """ get_page = functools.partial(self.get_related_indicators_page, indicators, enclave_ids) return Page.get_page_generator(get_page, start_page, page_size)
Creates a generator from the |get_related_indicators_page| method that returns each successive page. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param start_page: The page to start on. :param page_size: The size of each page. :return: The generator.
entailment
def _get_whitelist_page_generator(self, start_page=0, page_size=None): """ Creates a generator from the |get_whitelist_page| method that returns each successive page. :param int start_page: The page to start on. :param int page_size: The size of each page. :return: The generator. """ return Page.get_page_generator(self.get_whitelist_page, start_page, page_size)
Creates a generator from the |get_whitelist_page| method that returns each successive page. :param int start_page: The page to start on. :param int page_size: The size of each page. :return: The generator.
entailment
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0): """Display the system as points. :param float size: the size of the points. """ if colorlist is None: colorlist = [get_atom_color(t) for t in self.topology['atom_types']] if highlight is not None: if isinstance(highlight, int): colorlist[highlight] = 0xff0000 if isinstance(highlight, (list, np.ndarray)): for i in highlight: colorlist[i] = 0xff0000 sizes = [size] * len(self.topology['atom_types']) points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'), 'colors': colorlist, 'sizes': sizes, 'opacity': opacity}) # Update closure def update(self=self, points=points): self.update_representation(points, {'coordinates': self.coordinates.astype('float32')}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the system as points. :param float size: the size of the points.
entailment
def labels(self, text=None, coordinates=None, colorlist=None, sizes=None, fonts=None, opacity=1.0): '''Display atomic labels for the system''' if coordinates is None: coordinates=self.coordinates l=len(coordinates) if text is None: if len(self.topology.get('atom_types'))==l: text=[self.topology['atom_types'][i]+str(i+1) for i in range(l)] else: text=[str(i+1) for i in range(l)] text_representation = self.add_representation('text', {'coordinates': coordinates, 'text': text, 'colors': colorlist, 'sizes': sizes, 'fonts': fonts, 'opacity': opacity}) def update(self=self, text_representation=text_representation): self.update_representation(text_representation, {'coordinates': coordinates}) self.update_callbacks.append(update)
Display atomic labels for the system
entailment
def remove_labels(self): '''Remove all atomic labels from the system''' for rep_id in self.representations.keys(): if self.representations[rep_id]['rep_type']=='text' and rep_id not in self._axes_reps: self.remove_representation(rep_id)
Remove all atomic labels from the system
entailment
def toggle_axes(self, parameters = None): '''Toggle axes [x,y,z] on and off for the current representation Parameters: dictionary of parameters to control axes: position/p: origin of axes length/l: length of axis offset/o: offset to place axis labels axis_colors/ac: axis colors text_colors/tc: label colors radii/r: axis radii text/t: label text sizes/s: label sizes fonts/f: label fonts''' if len(self._axes_reps)>0: for rep_id in self._axes_reps: self.remove_representation(rep_id) self._axes_reps = [] else: if not isinstance(parameters,dict): parameters={} def defaults(pdict,keys,default,length=3,instance=(int,float)): '''Helper function to generate default values and handle errors''' for k in keys: val=pdict.get(k) if val!=None: break if val==None: val=default elif isinstance(val,instance) and length>1: val = [val]*length elif isinstance(val,(list,np.generic,np.ndarray)) and length>1: if not all([isinstance(v,instance) for v in val]): raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance)) elif not isinstance(val,instance): raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance)) return val p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0)) l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1) o = defaults(parameters,['offsets','offset','o'],l*1.05,1) ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex)) tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex)) r = defaults(parameters,['radii','radius','r'],[0.005]*3,3) t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str) s = defaults(parameters,['sizes','size','s'],[32]*3,3) f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str) starts=np.array([p,p,p],float) ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float) axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float) a_rep=self.add_representation('cylinders',{"startCoords":starts, "endCoords":ends, "colors":ac, "radii":r}) t_rep=self.add_representation('text',{"coordinates":axis_labels_coords, "text":t, "colors":tc, "sizes":s, "fonts":f}) self._axes_reps = [a_rep, t_rep]
Toggle axes [x,y,z] on and off for the current representation Parameters: dictionary of parameters to control axes: position/p: origin of axes length/l: length of axis offset/o: offset to place axis labels axis_colors/ac: axis colors text_colors/tc: label colors radii/r: axis radii text/t: label text sizes/s: label sizes fonts/f: label fonts
entailment
def lines(self): '''Display the system bonds as lines. ''' if "bonds" not in self.topology: return bond_start, bond_end = zip(*self.topology['bonds']) bond_start = np.array(bond_start) bond_end = np.array(bond_end) color_array = np.array([get_atom_color(t) for t in self.topology['atom_types']]) lines = self.add_representation('lines', {'startCoords': self.coordinates[bond_start], 'endCoords': self.coordinates[bond_end], 'startColors': color_array[bond_start].tolist(), 'endColors': color_array[bond_end].tolist()}) def update(self=self, lines=lines): bond_start, bond_end = zip(*self.topology['bonds']) bond_start = np.array(bond_start) bond_end = np.array(bond_end) self.update_representation(lines, {'startCoords': self.coordinates[bond_start], 'endCoords': self.coordinates[bond_end]}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the system bonds as lines.
entailment
def wireframe(self, pointsize=0.2, opacity=1.0): '''Display atoms as points of size *pointsize* and bonds as lines.''' self.points(pointsize, opacity=opacity) self.lines()
Display atoms as points of size *pointsize* and bonds as lines.
entailment
def ball_and_sticks(self, ball_radius=0.05, stick_radius=0.02, colorlist=None, opacity=1.0): """Display the system using a ball and stick representation. """ # Add the spheres if colorlist is None: colorlist = [get_atom_color(t) for t in self.topology['atom_types']] sizes = [ball_radius] * len(self.topology['atom_types']) spheres = self.add_representation('spheres', {'coordinates': self.coordinates.astype('float32'), 'colors': colorlist, 'radii': sizes, 'opacity': opacity}) def update(self=self, spheres=spheres): self.update_representation(spheres, {'coordinates': self.coordinates.astype('float32')}) self.update_callbacks.append(update) # Add the cylinders if 'bonds' in self.topology and self.topology['bonds'] is not None: start_idx, end_idx = zip(*self.topology['bonds']) # Added this so bonds don't go through atoms when opacity<1.0 new_start_coords = [] new_end_coords = [] for bond_ind, bond in enumerate(self.topology['bonds']): trim_amt = (ball_radius**2 - stick_radius**2)**0.5 if ball_radius>stick_radius else 0 start_coord = self.coordinates[bond[0]] end_coord = self.coordinates[bond[1]] vec = (end_coord-start_coord)/np.linalg.norm(end_coord-start_coord) new_start_coords.append(start_coord+vec*trim_amt) new_end_coords.append(end_coord-vec*trim_amt) cylinders = self.add_representation('cylinders', {'startCoords': np.array(new_start_coords,dtype='float32'), 'endCoords': np.array(new_end_coords,dtype='float32'), 'colors': [0xcccccc] * len(new_start_coords), 'radii': [stick_radius] * len(new_start_coords), 'opacity': opacity}) # Update closure def update(self=self, rep=cylinders, start_idx=start_idx, end_idx=end_idx): self.update_representation(rep, {'startCoords': self.coordinates[list(start_idx)], 'endCoords': self.coordinates[list(end_idx)]}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the system using a ball and stick representation.
entailment
def line_ribbon(self): '''Display the protein secondary structure as a white lines that passes through the backbone chain. ''' # Control points are the CA (C alphas) backbone = np.array(self.topology['atom_names']) == 'CA' smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone], 'color': 0xffffff}) def update(self=self, smoothline=smoothline): self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the protein secondary structure as a white lines that passes through the backbone chain.
entailment
def cylinder_and_strand(self): '''Display the protein secondary structure as a white, solid tube and the alpha-helices as yellow cylinders. ''' top = self.topology # We build a mini-state machine to find the # start end of helices and such in_helix = False helices_starts = [] helices_ends = [] coils = [] coil = [] for i, typ in enumerate(top['secondary_structure']): if typ == 'H': if in_helix == False: # We become helices helices_starts.append(top['residue_indices'][i][0]) in_helix = True # We end the previous coil coil.append(top['residue_indices'][i][0]) else: if in_helix == True: # We stop being helices helices_ends.append(top['residue_indices'][i][0]) # We start a new coil coil = [] coils.append(coil) in_helix = False # We add control points coil.append(top['residue_indices'][i][0]) [coil.append(j) for j in top['residue_indices'][i] if top['atom_names'][j] == 'CA'] # We add the coils coil_representations = [] for control_points in coils: rid = self.add_representation('smoothtube', {'coordinates': self.coordinates[control_points], 'radius': 0.05, 'resolution': 4, 'color': 0xffffff}) coil_representations.append(rid) start_idx, end_idx = helices_starts, helices_ends cylinders = self.add_representation('cylinders', {'startCoords': self.coordinates[list(start_idx)], 'endCoords': self.coordinates[list(end_idx)], 'colors': [0xffff00] * len(self.coordinates), 'radii': [0.15] * len(self.coordinates)}) def update(self=self, cylinders=cylinders, coils=coils, coil_representations=coil_representations, start_idx=start_idx, end_idx=end_idx, control_points=control_points): for i, control_points in enumerate(coils): rid = self.update_representation(coil_representations[i], {'coordinates': self.coordinates[control_points]}) self.update_representation(cylinders, {'startCoords': self.coordinates[list(start_idx)], 'endCoords': self.coordinates[list(end_idx)]}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the protein secondary structure as a white, solid tube and the alpha-helices as yellow cylinders.
entailment
def cartoon(self, cmap=None): '''Display a protein secondary structure as a pymol-like cartoon representation. :param cmap: is a dictionary that maps the secondary type (H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white) ''' # Parse secondary structure top = self.topology geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates, types=top['atom_names'], secondary_type=top['secondary_structure']), cmap=cmap) primitives = geom.produce(gg.Aes()) ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives] def update(self=self, geom=geom, ids=ids): primitives = geom.produce(gg.Aes(xyz=self.coordinates)) [self.update_representation(id_, rep_options) for id_, rep_options in zip(ids, primitives)] self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display a protein secondary structure as a pymol-like cartoon representation. :param cmap: is a dictionary that maps the secondary type (H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
entailment
def add_isosurface(self, function, isolevel=0.3, resolution=32, style="wireframe", color=0xffffff): '''Add an isosurface to the current scene. :param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple functions that involve standard arithmetic operations and functions such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first pass the function through ``numpy.vectorize``.\ Example: ``mv.add_isosurface(np.vectorize(f))`` :param float isolevel: The value for which the function should be constant. :param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance. :param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``. :param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white. ''' avail_styles = ['wireframe', 'solid', 'transparent'] if style not in avail_styles: raise ValueError('style must be in ' + str(avail_styles)) # We want to make a container that contains the whole molecule # and surface area_min = self.coordinates.min(axis=0) - 0.2 area_max = self.coordinates.max(axis=0) + 0.2 x = np.linspace(area_min[0], area_max[0], resolution) y = np.linspace(area_min[1], area_max[1], resolution) z = np.linspace(area_min[2], area_max[2], resolution) xv, yv, zv = np.meshgrid(x, y, z) spacing = np.array((area_max - area_min)/resolution) if isolevel >= 0: triangles = marching_cubes(function(xv, yv, zv), isolevel) else: # Wrong traingle unwinding roder -- god only knows why triangles = marching_cubes(-function(xv, yv, zv), -isolevel) if len(triangles) == 0: ## NO surface return faces = [] verts = [] for i, t in enumerate(triangles): faces.append([i * 3, i * 3 +1, i * 3 + 2]) verts.extend(t) faces = np.array(faces) verts = area_min + spacing/2 + np.array(verts)*spacing rep_id = self.add_representation('surface', {'verts': verts.astype('float32'), 'faces': faces.astype('int32'), 'style': style, 'color': color}) self.autozoom(verts)
Add an isosurface to the current scene. :param callable function: A function that takes x, y, z coordinates as input and is broadcastable using numpy. Typically simple functions that involve standard arithmetic operations and functions such as ``x**2 + y**2 + z**2`` or ``np.exp(x**2 + y**2 + z**2)`` will work. If not sure, you can first pass the function through ``numpy.vectorize``.\ Example: ``mv.add_isosurface(np.vectorize(f))`` :param float isolevel: The value for which the function should be constant. :param int resolution: The number of grid point to use for the surface. An high value will give better quality but lower performance. :param str style: The surface style, choose between ``solid``, ``wireframe`` and ``transparent``. :param int color: The color given as an hexadecimal integer. Example: ``0xffffff`` is white.
entailment
def add_isosurface_grid_data(self, data, origin, extent, resolution, isolevel=0.3, scale=10, style="wireframe", color=0xffffff): """ Add an isosurface to current scence using pre-computed data on a grid """ spacing = np.array(extent/resolution)/scale if isolevel >= 0: triangles = marching_cubes(data, isolevel) else: triangles = marching_cubes(-data, -isolevel) faces = [] verts = [] for i, t in enumerate(triangles): faces.append([i * 3, i * 3 +1, i * 3 + 2]) verts.extend(t) faces = np.array(faces) verts = origin + spacing/2 + np.array(verts)*spacing rep_id = self.add_representation('surface', {'verts': verts.astype('float32'), 'faces': faces.astype('int32'), 'style': style, 'color': color}) self.autozoom(verts)
Add an isosurface to current scence using pre-computed data on a grid
entailment
def _refresh_token(self): """ Retrieves the OAuth2 token generated by the user's API key and API secret. Sets the instance property 'token' to this new token. If the current token is still live, the server will simply return that. """ # use basic auth with API key and secret client_auth = requests.auth.HTTPBasicAuth(self.api_key, self.api_secret) # make request post_data = {"grant_type": "client_credentials"} response = requests.post(self.auth, auth=client_auth, data=post_data, proxies=self.proxies) self.last_response = response # raise exception if status code indicates an error if 400 <= response.status_code < 600: message = "{} {} Error (Trace-Id: {}): {}".format(response.status_code, "Client" if response.status_code < 500 else "Server", self._get_trace_id(response), "unable to get token") raise HTTPError(message, response=response) # set token property to the received token self.token = response.json()["access_token"]
Retrieves the OAuth2 token generated by the user's API key and API secret. Sets the instance property 'token' to this new token. If the current token is still live, the server will simply return that.
entailment
def _get_headers(self, is_json=False): """ Create headers dictionary for a request. :param boolean is_json: Whether the request body is a json. :return: The headers dictionary. """ headers = {"Authorization": "Bearer " + self._get_token()} if self.client_type is not None: headers["Client-Type"] = self.client_type if self.client_version is not None: headers["Client-Version"] = self.client_version if self.client_metatag is not None: headers["Client-Metatag"] = self.client_metatag if is_json: headers['Content-Type'] = 'application/json' return headers
Create headers dictionary for a request. :param boolean is_json: Whether the request body is a json. :return: The headers dictionary.
entailment
def _is_expired_token_response(cls, response): """ Determine whether the given response indicates that the token is expired. :param response: The response object. :return: True if the response indicates that the token is expired. """ EXPIRED_MESSAGE = "Expired oauth2 access token" INVALID_MESSAGE = "Invalid oauth2 access token" if response.status_code == 400: try: body = response.json() if str(body.get('error_description')) in [EXPIRED_MESSAGE, INVALID_MESSAGE]: return True except: pass return False
Determine whether the given response indicates that the token is expired. :param response: The response object. :return: True if the response indicates that the token is expired.
entailment
def request(self, method, path, headers=None, params=None, data=None, **kwargs): """ A wrapper around ``requests.request`` that handles boilerplate code specific to TruStar's API. :param str method: The method of the request (``GET``, ``PUT``, ``POST``, or ``DELETE``) :param str path: The path of the request, i.e. the piece of the URL after the base URL :param dict headers: A dictionary of headers that will be merged with the base headers for the SDK :param kwargs: Any extra keyword arguments. These will be forwarded to the call to ``requests.request``. :return: The response object. """ retry = self.retry attempted = False while not attempted or retry: # get headers and merge with headers from method parameter if it exists base_headers = self._get_headers(is_json=method in ["POST", "PUT"]) if headers is not None: base_headers.update(headers) url = "{}/{}".format(self.base, path) # make request response = requests.request(method=method, url=url, headers=base_headers, verify=self.verify, params=params, data=data, proxies=self.proxies, **kwargs) self.last_response = response attempted = True # log request self.logger.debug("%s %s. Trace-Id: %s. Params: %s", method, url, response.headers.get('Trace-Id'), params) # refresh token if expired if self._is_expired_token_response(response): self._refresh_token() # if "too many requests" status code received, wait until next request will be allowed and retry elif retry and response.status_code == 429: wait_time = ceil(response.json().get('waitTime') / 1000) self.logger.debug("Waiting %d seconds until next request allowed." % wait_time) # if wait time exceeds max wait time, allow the exception to be thrown if wait_time <= self.max_wait_time: time.sleep(wait_time) else: retry = False # request cycle is complete else: retry = False # raise exception if status code indicates an error if 400 <= response.status_code < 600: # get response json body, if one exists resp_json = None try: resp_json = response.json() except: pass # get message from json body, if one exists if resp_json is not None and 'message' in resp_json: reason = resp_json['message'] else: reason = "unknown cause" # construct error message message = "{} {} Error (Trace-Id: {}): {}".format(response.status_code, "Client" if response.status_code < 500 else "Server", self._get_trace_id(response), reason) # raise HTTPError raise HTTPError(message, response=response) return response
A wrapper around ``requests.request`` that handles boilerplate code specific to TruStar's API. :param str method: The method of the request (``GET``, ``PUT``, ``POST``, or ``DELETE``) :param str path: The path of the request, i.e. the piece of the URL after the base URL :param dict headers: A dictionary of headers that will be merged with the base headers for the SDK :param kwargs: Any extra keyword arguments. These will be forwarded to the call to ``requests.request``. :return: The response object.
entailment
def isosurface_from_data(data, isolevel, origin, spacing): """Small wrapper to get directly vertices and faces to feed into programs """ spacing = np.array(extent/resolution) if isolevel >= 0: triangles = marching_cubes(data, isolevel) else: # Wrong traingle unwinding roder -- god only knows why triangles = marching_cubes(-data, -isolevel) faces = [] verts = [] for i, t in enumerate(triangles): faces.append([i * 3, i * 3 +1, i * 3 + 2]) verts.extend(t) faces = np.array(faces) verts = origin + spacing/2 + np.array(verts)*spacing return verts, faces
Small wrapper to get directly vertices and faces to feed into programs
entailment
def _get_gtf_column(column_name, gtf_path, df): """ Helper function which returns a dictionary column or raises an ValueError abou the absence of that column in a GTF file. """ if column_name in df.columns: return list(df[column_name]) else: raise ValueError( "Missing '%s' in columns of %s, available: %s" % ( column_name, gtf_path, list(df.columns)))
Helper function which returns a dictionary column or raises an ValueError abou the absence of that column in a GTF file.
entailment
def load_transcript_fpkm_dict_from_gtf( gtf_path, transcript_id_column_name="reference_id", fpkm_column_name="FPKM", feature_column_name="feature"): """ Load a GTF file generated by StringTie which contains transcript-level quantification of abundance. Returns a dictionary mapping Ensembl IDs of transcripts to FPKM values. """ df = gtfparse.read_gtf( gtf_path, column_converters={fpkm_column_name: float}) transcript_ids = _get_gtf_column(transcript_id_column_name, gtf_path, df) fpkm_values = _get_gtf_column(fpkm_column_name, gtf_path, df) features = _get_gtf_column(feature_column_name, gtf_path, df) logging.info("Loaded %d rows from %s" % (len(transcript_ids), gtf_path)) logging.info("Found %s transcript entries" % sum( feature == "transcript" for feature in features)) result = { transcript_id: float(fpkm) for (transcript_id, fpkm, feature) in zip(transcript_ids, fpkm_values, features) if ( (transcript_id is not None) and (len(transcript_id) > 0) and (feature == "transcript") ) } logging.info("Keeping %d transcript rows with reference IDs" % ( len(result),)) return result
Load a GTF file generated by StringTie which contains transcript-level quantification of abundance. Returns a dictionary mapping Ensembl IDs of transcripts to FPKM values.
entailment
def predict_from_named_sequences( self, name_to_sequence_dict): """ Parameters ---------- name_to_sequence_dict : (str->str) dict Dictionary mapping sequence names to amino acid sequences Returns pandas.DataFrame with the following columns: - source_sequence_name - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name """ df = self.mhc_model.predict_subsequences_dataframe(name_to_sequence_dict) return df.rename( columns={ "length": "peptide_length", "offset": "peptide_offset"})
Parameters ---------- name_to_sequence_dict : (str->str) dict Dictionary mapping sequence names to amino acid sequences Returns pandas.DataFrame with the following columns: - source_sequence_name - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name
entailment
def predict_from_sequences(self, sequences): """ Predict MHC ligands for sub-sequences of each input sequence. Parameters ---------- sequences : list of str Multiple amino acid sequences (without any names or IDs) Returns DataFrame with the following fields: - source_sequence - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name """ # make each sequence its own unique ID sequence_dict = { seq: seq for seq in sequences } df = self.predict_from_named_sequences(sequence_dict) return df.rename(columns={"source_sequence_name": "source_sequence"})
Predict MHC ligands for sub-sequences of each input sequence. Parameters ---------- sequences : list of str Multiple amino acid sequences (without any names or IDs) Returns DataFrame with the following fields: - source_sequence - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name
entailment
def predict_from_mutation_effects( self, effects, transcript_expression_dict=None, gene_expression_dict=None): """Given a Varcode.EffectCollection of predicted protein effects, return predicted epitopes around each mutation. Parameters ---------- effects : Varcode.EffectCollection transcript_expression_dict : dict Dictionary mapping transcript IDs to RNA expression estimates. Used both for transcript expression filtering and for selecting the most abundant transcript for a particular variant. If omitted then transcript selection is done using priority of variant effects and transcript length. gene_expression_dict : dict, optional Dictionary mapping gene IDs to RNA expression estimates Returns DataFrame with the following columns: - variant - gene - gene_id - transcript_id - transcript_name - effect - effect_type - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name - contains_mutant_residues - mutation_start_in_peptide - mutation_end_in_peptide Optionall will also include the following columns if corresponding expression dictionary inputs are provided: - gene_expression - transcript_expression """ # we only care about effects which impact the coding sequence of a # protein effects = filter_silent_and_noncoding_effects(effects) effects = apply_effect_expression_filters( effects, transcript_expression_dict=transcript_expression_dict, transcript_expression_threshold=self.min_transcript_expression, gene_expression_dict=gene_expression_dict, gene_expression_threshold=self.min_gene_expression) # group by variants, so that we end up with only one mutant # sequence per mutation variant_effect_groups = effects.groupby_variant() if len(variant_effect_groups) == 0: logging.warn("No candidates for MHC binding prediction") return [] if transcript_expression_dict: # if expression data is available, then for each variant # keep the effect annotation for the most abundant transcript top_effects = [ variant_effects.top_expression_effect( transcript_expression_dict) for variant_effects in variant_effect_groups.values() ] else: # if no transcript abundance data is available, then # for each variant keep the effect with the most significant # predicted effect on the protein sequence, along with using # transcript/CDS length as a tie-breaker for effects with the same # priority. top_effects = [ variant_effects.top_priority_effect() for variant_effects in variant_effect_groups.values() ] # 1) dictionary mapping varcode effect objects to subsequences # around each mutation # 2) dictionary mapping varcode effect to start offset of subsequence # within the full mutant protein sequence effect_to_subsequence_dict, effect_to_offset_dict = \ protein_subsequences_around_mutations( effects=top_effects, padding_around_mutation=self.padding_around_mutation) # since we know that each set of variant effects has been # reduced to a single 'top priority' effect, we can uniquely # identify each variant sequence by its original genomic variant variant_string_to_effect_dict = { effect.variant.short_description: effect for effect in effect_to_subsequence_dict.keys() } variant_string_to_subsequence_dict = { effect.variant.short_description: subseq for (effect, subseq) in effect_to_subsequence_dict.items() } variant_string_to_offset_dict = { effect.variant.short_description: subseq_offset for (effect, subseq_offset) in effect_to_offset_dict.items() } df = self.predict_from_named_sequences(variant_string_to_subsequence_dict) logging.info("MHC predictor returned %d peptide binding predictions" % ( len(df))) # since we used variant descrptions as the name of each sequence # let's rename that column to be more informative df = df.rename(columns={"source_sequence_name": "variant"}) # adjust offset to be relative to start of protein, rather # than whatever subsequence we used for prediction def compute_peptide_offset_relative_to_protein(row): subsequence_offset = variant_string_to_offset_dict[row.variant] return row.peptide_offset + subsequence_offset df["peptide_offset"] = df.apply( compute_peptide_offset_relative_to_protein, axis=1) if self.ic50_cutoff: df = df[df.affinity <= self.ic50_cutoff] logging.info("Kept %d predictions after filtering affinity <= %f" % ( len(df), self.ic50_cutoff)) if self.percentile_cutoff: df = df[df.percentile_rank <= self.percentile_cutoff] logging.info("Kept %d predictions after filtering percentile <= %f" % ( len(df), self.percentile_cutoff)) extra_columns = OrderedDict([ ('gene', []), ('gene_id', []), ('transcript_id', []), ('transcript_name', []), ('effect', []), ('effect_type', []), ('contains_mutant_residues', []), ('mutation_start_in_peptide', []), ('mutation_end_in_peptide', []), ]) if gene_expression_dict is not None: extra_columns["gene_expression"] = [] if transcript_expression_dict is not None: extra_columns["transcript_expression"] = [] for _, row in df.iterrows(): effect = variant_string_to_effect_dict[row.variant] mutation_start_in_protein = effect.aa_mutation_start_offset mutation_end_in_protein = effect.aa_mutation_end_offset peptide_length = len(row.peptide) is_mutant = contains_mutant_residues( peptide_start_in_protein=row.peptide_offset, peptide_length=peptide_length, mutation_start_in_protein=mutation_start_in_protein, mutation_end_in_protein=mutation_end_in_protein) if is_mutant: mutation_start_in_peptide, mutation_end_in_peptide = peptide_mutation_interval( peptide_start_in_protein=row.peptide_offset, peptide_length=peptide_length, mutation_start_in_protein=mutation_start_in_protein, mutation_end_in_protein=mutation_end_in_protein) else: mutation_start_in_peptide = mutation_end_in_peptide = None extra_columns["gene"].append(effect.gene_name) gene_id = effect.gene_id extra_columns["gene_id"].append(gene_id) if gene_expression_dict is not None: extra_columns["gene_expression"].append( gene_expression_dict.get(gene_id, 0.0)) transcript_id = effect.transcript_id extra_columns["transcript_id"].append(transcript_id) extra_columns["transcript_name"].append(effect.transcript_name) if transcript_expression_dict is not None: extra_columns["transcript_expression"].append( transcript_expression_dict.get(transcript_id, 0.0)) extra_columns["effect"].append(effect.short_description) extra_columns["effect_type"].append(effect.__class__.__name__) extra_columns["contains_mutant_residues"].append(is_mutant) extra_columns["mutation_start_in_peptide"].append(mutation_start_in_peptide) extra_columns["mutation_end_in_peptide"].append(mutation_end_in_peptide) for col, values in extra_columns.items(): df[col] = values # TODO: add extra boolean field # novel = is_mutant | not_in_reference # Requires keeping a quick lookup structure for all peptides in # the reference proteome if self.only_novel_epitopes: df = df[df.contains_mutant_residues] return df
Given a Varcode.EffectCollection of predicted protein effects, return predicted epitopes around each mutation. Parameters ---------- effects : Varcode.EffectCollection transcript_expression_dict : dict Dictionary mapping transcript IDs to RNA expression estimates. Used both for transcript expression filtering and for selecting the most abundant transcript for a particular variant. If omitted then transcript selection is done using priority of variant effects and transcript length. gene_expression_dict : dict, optional Dictionary mapping gene IDs to RNA expression estimates Returns DataFrame with the following columns: - variant - gene - gene_id - transcript_id - transcript_name - effect - effect_type - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name - contains_mutant_residues - mutation_start_in_peptide - mutation_end_in_peptide Optionall will also include the following columns if corresponding expression dictionary inputs are provided: - gene_expression - transcript_expression
entailment
def predict_from_variants( self, variants, transcript_expression_dict=None, gene_expression_dict=None): """ Predict epitopes from a Variant collection, filtering options, and optional gene and transcript expression data. Parameters ---------- variants : varcode.VariantCollection transcript_expression_dict : dict Maps from Ensembl transcript IDs to FPKM expression values. gene_expression_dict : dict, optional Maps from Ensembl gene IDs to FPKM expression values. Returns DataFrame with the following columns: - variant - gene - gene_id - transcript_id - transcript_name - effect - effect_type - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name - contains_mutant_residues - mutation_start_in_peptide - mutation_end_in_peptide Optionall will also include the following columns if corresponding expression dictionary inputs are provided: - gene_expression - transcript_expression """ # pre-filter variants by checking if any of the genes or # transcripts they overlap have sufficient expression. # I'm tolerating the redundancy of this code since it's much cheaper # to filter a variant *before* trying to predict its impact/effect # on the protein sequence. variants = apply_variant_expression_filters( variants, transcript_expression_dict=transcript_expression_dict, transcript_expression_threshold=self.min_transcript_expression, gene_expression_dict=gene_expression_dict, gene_expression_threshold=self.min_gene_expression) effects = variants.effects(raise_on_error=self.raise_on_error) return self.predict_from_mutation_effects( effects=effects, transcript_expression_dict=transcript_expression_dict, gene_expression_dict=gene_expression_dict)
Predict epitopes from a Variant collection, filtering options, and optional gene and transcript expression data. Parameters ---------- variants : varcode.VariantCollection transcript_expression_dict : dict Maps from Ensembl transcript IDs to FPKM expression values. gene_expression_dict : dict, optional Maps from Ensembl gene IDs to FPKM expression values. Returns DataFrame with the following columns: - variant - gene - gene_id - transcript_id - transcript_name - effect - effect_type - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name - contains_mutant_residues - mutation_start_in_peptide - mutation_end_in_peptide Optionall will also include the following columns if corresponding expression dictionary inputs are provided: - gene_expression - transcript_expression
entailment
def main(args_list=None): """ Script entry-point to predict neo-epitopes from genomic variants using Topiary. """ args = parse_args(args_list) print("Topiary commandline arguments:") print(args) df = predict_epitopes_from_args(args) write_outputs(df, args) print("Total count: %d" % len(df))
Script entry-point to predict neo-epitopes from genomic variants using Topiary.
entailment
def render_povray(scene, filename='ipython', width=600, height=600, antialiasing=0.01, extra_opts={}): '''Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene. ''' if not vapory_available: raise Exception("To render with povray, you need to have the vapory" " package installed.") # Adding extra options scene = normalize_scene(scene) scene.update(extra_opts) # Camera target aspect = scene['camera']['aspect'] up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0]) v_fov = scene['camera']['vfov'] / 180.0 * np.pi h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180 # Setup camera position camera = vp.Camera( 'location', scene['camera']['location'], 'direction', [0, 0, -1], 'sky', up, 'look_at', scene['camera']['target'], 'angle', h_fov ) global_settings = [] # Setup global illumination if scene.get('radiosity', False): # Global Illumination radiosity = vp.Radiosity( 'brightness', 2.0, 'count', 100, 'error_bound', 0.15, 'gray_threshold', 0.0, 'low_error_factor', 0.2, 'minimum_reuse', 0.015, 'nearest_count', 10, 'recursion_limit', 1, #Docs say 1 is enough 'adc_bailout', 0.01, 'max_sample', 0.5, 'media off', 'normal off', 'always_sample', 1, 'pretrace_start', 0.08, 'pretrace_end', 0.01) light_sources = [] global_settings.append(radiosity) else: # Lights light_sources = [ vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] ) ] # Background -- white for now background = vp.Background([1, 1, 1]) # Things to display stuff = _generate_objects(scene['representations']) scene = vp.Scene( camera, objects = light_sources + stuff + [background], global_settings=global_settings) return scene.render(filename, width=width, height=height, antialiasing = antialiasing)
Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene.
entailment
def rmatrixquaternion(q): """Create a rotation matrix from q quaternion rotation. Quaternions are typed as Numeric Python numpy.arrays of length 4. """ assert np.allclose(math.sqrt(np.dot(q,q)), 1.0) x, y, z, w = q xx = x*x xy = x*y xz = x*z xw = x*w yy = y*y yz = y*z yw = y*w zz = z*z zw = z*w r00 = 1.0 - 2.0 * (yy + zz) r01 = 2.0 * (xy - zw) r02 = 2.0 * (xz + yw) r10 = 2.0 * (xy + zw) r11 = 1.0 - 2.0 * (xx + zz) r12 = 2.0 * (yz - xw) r20 = 2.0 * (xz - yw) r21 = 2.0 * (yz + xw) r22 = 1.0 - 2.0 * (xx + yy) R = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]], float) assert np.allclose(np.linalg.det(R), 1.0) return R
Create a rotation matrix from q quaternion rotation. Quaternions are typed as Numeric Python numpy.arrays of length 4.
entailment
def rna_transcript_expression_dict_from_args(args): """ Returns a dictionary mapping Ensembl transcript IDs to FPKM expression values or None if neither Cufflinks tracking file nor StringTie GTF file were specified. """ if args.rna_transcript_fpkm_tracking_file: return load_cufflinks_fpkm_dict(args.rna_transcript_fpkm_tracking_file) elif args.rna_transcript_fpkm_gtf_file: return load_transcript_fpkm_dict_from_gtf( args.rna_transcript_fpkm_gtf_file) else: return None
Returns a dictionary mapping Ensembl transcript IDs to FPKM expression values or None if neither Cufflinks tracking file nor StringTie GTF file were specified.
entailment
def get_total_pages(self): """ :return: The total number of pages on the server. """ if self.total_elements is None or self.page_size is None: return None return math.ceil(float(self.total_elements) / float(self.page_size))
:return: The total number of pages on the server.
entailment
def has_more_pages(self): """ :return: ``True`` if there are more pages available on the server. """ # if has_next property exists, it represents whether more pages exist if self.has_next is not None: return self.has_next # otherwise, try to compute whether or not more pages exist total_pages = self.get_total_pages() if self.page_number is None or total_pages is None: return None else: return self.page_number + 1 < total_pages
:return: ``True`` if there are more pages available on the server.
entailment