_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q5600
offset_limit
train
def offset_limit(func): """ Decorator that converts python slicing to offset and limit """ def func_wrapper(self, start, stop): offset = start limit = stop - start return func(self, offset, limit) return func_wrapper
python
{ "resource": "" }
q5601
generate_id
train
def generate_id(*s): """ generates an id from one or more given strings it uses english as the base language in case some strings are translated, this ensures consistent ids """ with translation.override("en"): generated_id = slugify("-".join([str(i) for i in s])) return generated_id
python
{ "resource": "" }
q5602
append_query_parameter
train
def append_query_parameter(url, parameters, ignore_if_exists=True): """ quick and dirty appending of query parameters to a url """ if ignore_if_exists: for key in parameters.keys(): if key + "=" in url: del parameters[key] parameters_str = "&".join(k + "=" + v for k, v in parameters.items()) append_token = "&" if "?" in url else "?" return url + append_token + parameters_str
python
{ "resource": "" }
q5603
BetterFileInput.render
train
def render(self, name, value, attrs=None, renderer=None): """For django 1.10 compatibility""" if django.VERSION >= (1, 11): return super(BetterFileInput, self).render(name, value, attrs) t = render_to_string( template_name=self.template_name, context=self.get_context(name, value, attrs), ) return mark_safe(t)
python
{ "resource": "" }
q5604
get_table
train
def get_table(ports): """ This function returns a pretty table used to display the port results. :param ports: list of found ports :return: the table to display """ table = PrettyTable(["Name", "Port", "Protocol", "Description"]) table.align["Name"] = "l" table.align["Description"] = "l" table.padding_width = 1 for port in ports: table.add_row(port) return table
python
{ "resource": "" }
q5605
run
train
def run(port, like, use_json, server): """Search port names and numbers.""" if not port and not server[0]: raise click.UsageError("Please specify a port") if server[0]: app.run(host=server[0], port=server[1]) return ports = get_ports(port, like) if not ports: sys.stderr.write("No ports found for '{0}'\n".format(port)) return if use_json: print(json.dumps(ports, indent=4)) else: table = get_table(ports) print(table)
python
{ "resource": "" }
q5606
RPCClient.validate_addr
train
def validate_addr(self, address, id=None, endpoint=None): """ returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)
python
{ "resource": "" }
q5607
MPIPool.map
train
def map(self, worker, tasks, callback=None): """Evaluate a function or callable on each task in parallel using MPI. The callable, ``worker``, is called on each element of the ``tasks`` iterable. The results are returned in the expected order (symmetric with ``tasks``). Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : list A list of results from the output of each ``worker()`` call. """ # If not the master just wait for instructions. if not self.is_master(): self.wait() return if callback is None: callback = _dummy_callback workerset = self.workers.copy() tasklist = [(tid, (worker, arg)) for tid, arg in enumerate(tasks)] resultlist = [None] * len(tasklist) pending = len(tasklist) while pending: if workerset and tasklist: worker = workerset.pop() taskid, task = tasklist.pop() log.log(_VERBOSE, "Sent task %s to worker %s with tag %s", task[1], worker, taskid) self.comm.send(task, dest=worker, tag=taskid) if tasklist: flag = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) if not flag: continue else: self.comm.Probe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) status = MPI.Status() result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) worker = status.source taskid = status.tag log.log(_VERBOSE, "Master received from worker %s with tag %s", worker, taskid) callback(result) workerset.add(worker) resultlist[taskid] = result pending -= 1 return resultlist
python
{ "resource": "" }
q5608
MPIPool.close
train
def close(self): """ Tell all the workers to quit.""" if self.is_worker(): return for worker in self.workers: self.comm.send(None, worker, 0)
python
{ "resource": "" }
q5609
update_git_devstr
train
def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version
python
{ "resource": "" }
q5610
get_git_devstr
train
def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip()
python
{ "resource": "" }
q5611
_get_repo_path
train
def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
python
{ "resource": "" }
q5612
translate
train
def translate( nucleotide_sequence, first_codon_is_start=True, to_stop=True, truncate=False): """Translates cDNA coding sequence into amino acid protein sequence. Should typically start with a start codon but allowing non-methionine first residues since the CDS we're translating might have been affected by a start loss mutation. The sequence may include the 3' UTR but will stop translation at the first encountered stop codon. Parameters ---------- nucleotide_sequence : BioPython Seq cDNA sequence first_codon_is_start : bool Treat the beginning of nucleotide_sequence (translates methionin) truncate : bool Truncate sequence if it's not a multiple of 3 (default = False) Returns BioPython Seq of amino acids """ if not isinstance(nucleotide_sequence, Seq): nucleotide_sequence = Seq(nucleotide_sequence) if truncate: # if sequence isn't a multiple of 3, truncate it so BioPython # doesn't complain n_nucleotides = int(len(nucleotide_sequence) / 3) * 3 nucleotide_sequence = nucleotide_sequence[:n_nucleotides] else: n_nucleotides = len(nucleotide_sequence) assert n_nucleotides % 3 == 0, \ ("Expected nucleotide sequence to be multiple of 3" " but got %s of length %d") % ( nucleotide_sequence, n_nucleotides) # passing cds=False to translate since we may want to deal with premature # stop codons protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False) if first_codon_is_start and ( len(protein_sequence) == 0 or protein_sequence[0] != "M"): if nucleotide_sequence[:3] in START_CODONS: # TODO: figure out when these should be made into methionines # and when left as whatever amino acid they normally code for # e.g. Leucine start codons # See: DOI: 10.1371/journal.pbio.0020397 return "M" + protein_sequence[1:] else: raise ValueError( ("Expected first codon of %s to be start codon" " (one of %s) but got %s") % ( protein_sequence[:10], START_CODONS, nucleotide_sequence)) return protein_sequence
python
{ "resource": "" }
q5613
main
train
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
python
{ "resource": "" }
q5614
load_maf_dataframe
train
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None): """ Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. """ require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
python
{ "resource": "" }
q5615
load_maf
train
def load_maf( path, optional_cols=[], sort_key=variant_ascending_position_sort_key, distinct=True, raise_on_error=True, encoding=None): """ Load reference name and Variant objects from MAF filename. Parameters ---------- path : str Path to MAF (*.maf). optional_cols : list, optional A list of MAF columns to include as metadata if they are present in the MAF. Does not result in an error if those columns are not present. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : bool Don't keep repeated variants raise_on_error : bool Raise an exception upon encountering an error or just log a warning. encoding : str, optional Encoding to use for UTF when reading MAF file. """ # pylint: disable=no-member # pylint gets confused by read_csv inside load_maf_dataframe maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding) if len(maf_df) == 0 and raise_on_error: raise ValueError("Empty MAF file %s" % path) ensembl_objects = {} variants = [] metadata = {} for _, x in maf_df.iterrows(): contig = x.Chromosome if isnull(contig): error_message = "Invalid contig name: %s" % (contig,) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue start_pos = x.Start_Position ref = x.Reference_Allele # it's possible in a MAF file to have multiple Ensembl releases # mixed in a single MAF file (the genome assembly is # specified by the NCBI_Build column) ncbi_build = x.NCBI_Build if ncbi_build in ensembl_objects: ensembl = ensembl_objects[ncbi_build] else: if isinstance(ncbi_build, int): reference_name = "B%d" % ncbi_build else: reference_name = str(ncbi_build) ensembl = infer_genome(reference_name) ensembl_objects[ncbi_build] = ensembl # have to try both Tumor_Seq_Allele1 and Tumor_Seq_Allele2 # to figure out which is different from the reference allele if x.Tumor_Seq_Allele1 != ref: alt = x.Tumor_Seq_Allele1 else: if x.Tumor_Seq_Allele2 == ref: error_message = ( "Both tumor alleles agree with reference %s: %s" % ( ref, x,)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue alt = x.Tumor_Seq_Allele2 variant = Variant( contig, start_pos, str(ref), str(alt), ensembl=ensembl) # keep metadata about the variant and its TCGA annotation metadata[variant] = { 'Hugo_Symbol': x.Hugo_Symbol, 'Center': x.Center, 'Strand': x.Strand, 'Variant_Classification': x.Variant_Classification, 'Variant_Type': x.Variant_Type, 'dbSNP_RS': x.dbSNP_RS, 'dbSNP_Val_Status': x.dbSNP_Val_Status, 'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode, 'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode, } for optional_col in optional_cols: if optional_col in x: metadata[variant][optional_col] = x[optional_col] variants.append(variant) return VariantCollection( variants=variants, source_to_metadata_dict={path: metadata}, sort_key=sort_key, distinct=distinct)
python
{ "resource": "" }
q5616
apply_to_field_if_exists
train
def apply_to_field_if_exists(effect, field_name, fn, default): """ Apply function to specified field of effect if it is not None, otherwise return default. """ value = getattr(effect, field_name, None) if value is None: return default else: return fn(value)
python
{ "resource": "" }
q5617
apply_to_transcript_if_exists
train
def apply_to_transcript_if_exists(effect, fn, default): """ Apply function to transcript associated with effect, if it exists, otherwise return default. """ return apply_to_field_if_exists( effect=effect, field_name="transcript", fn=fn, default=default)
python
{ "resource": "" }
q5618
gene_id_of_associated_transcript
train
def gene_id_of_associated_transcript(effect): """ Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
python
{ "resource": "" }
q5619
parse_transcript_number
train
def parse_transcript_number(effect): """ Try to parse the number at the end of a transcript name associated with an effect. e.g. TP53-001 returns the integer 1. Parameters ---------- effect : subclass of MutationEffect Returns int """ name = name_of_associated_transcript(effect) if "-" not in name: return 0 parts = name.split("-") last_part = parts[-1] if last_part.isdigit(): return int(last_part) else: return 0
python
{ "resource": "" }
q5620
select_between_exonic_splice_site_and_alternate_effect
train
def select_between_exonic_splice_site_and_alternate_effect(effect): """ If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority. In that case, return the alternate effect. Otherwise, this acts as an identity function. """ if effect.__class__ is not ExonicSpliceSite: return effect if effect.alternate_effect is None: return effect splice_priority = effect_priority(effect) alternate_priority = effect_priority(effect.alternate_effect) if splice_priority > alternate_priority: return effect else: return effect.alternate_effect
python
{ "resource": "" }
q5621
keep_max_priority_effects
train
def keep_max_priority_effects(effects): """ Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter """ priority_values = map(effect_priority, effects) max_priority = max(priority_values) return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
python
{ "resource": "" }
q5622
filter_pipeline
train
def filter_pipeline(effects, filters): """ Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects """ for filter_fn in filters: filtered_effects = filter_fn(effects) if len(effects) == 1: return effects elif len(filtered_effects) > 1: effects = filtered_effects return effects
python
{ "resource": "" }
q5623
top_priority_effect_for_single_gene
train
def top_priority_effect_for_single_gene(effects): """ For effects which are from the same gene, check to see if there is a canonical transcript with both the maximum length CDS and maximum length full transcript sequence. If not, then use number of exons and transcript name as tie-breaking features. Parameters ---------- effects : list of MutationEffect subclass instances Returns single effect object """ # first filter effects to keep those on # 1) maximum priority effects # 2) protein coding genes # 3) protein coding transcripts # 4) complete transcripts # # If any of these filters drop all the effects then we move on to the next # filtering step. effects = filter_pipeline( effects=effects, filters=[ keep_max_priority_effects, keep_effects_on_protein_coding_genes, keep_effects_on_protein_coding_transcripts, keep_effects_on_complete_transcripts, ], ) if len(effects) == 1: return effects[0] # compare CDS length and transcript lengths of remaining effects # if one effect has the maximum of both categories then return it cds_lengths = [cds_length_of_associated_transcript(e) for e in effects] max_cds_length = max(cds_lengths) # get set of indices of all effects with maximum CDS length max_cds_length_indices = { i for (i, l) in enumerate(cds_lengths) if l == max_cds_length } seq_lengths = [length_of_associated_transcript(e) for e in effects] max_seq_length = max(seq_lengths) # get set of indices for all effects whose associated transcript # has maximum sequence length max_seq_length_indices = { i for (i, l) in enumerate(seq_lengths) if l == max_seq_length } # which effects have transcripts with both the longest CDS and # longest full transcript sequence? intersection_of_indices = \ max_cds_length_indices.intersection(max_seq_length_indices) n_candidates = len(intersection_of_indices) if n_candidates == 1: best_index = intersection_of_indices.pop() return effects[best_index] elif n_candidates == 0: # if set of max CDS effects and max sequence length effects is disjoint # then let's try to do the tie-breaking sort over their union union_of_indices = max_cds_length_indices.union(max_seq_length_indices) candidate_effects = [effects[i] for i in union_of_indices] else: # if multiple effects have transcripts with the max CDS length and # the max full sequence length then run the tie-breaking sort # over all these candidates candidate_effects = [effects[i] for i in intersection_of_indices] # break ties by number of exons, whether name of transcript ends if "01", # and all else being equal, prefer transcript names that end with lower # numbers return max( candidate_effects, key=tie_breaking_sort_key_for_single_gene_effects)
python
{ "resource": "" }
q5624
VariantCollection.filter_by_transcript_expression
train
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
python
{ "resource": "" }
q5625
VariantCollection.filter_by_gene_expression
train
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
python
{ "resource": "" }
q5626
VariantCollection.exactly_equal
train
def exactly_equal(self, other): ''' Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison. ''' return ( self.__class__ == other.__class__ and len(self) == len(other) and all(x.exactly_equal(y) for (x, y) in zip(self, other)))
python
{ "resource": "" }
q5627
VariantCollection._combine_variant_collections
train
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs): """ Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection. """ kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries( [vc.source_to_metadata_dict for vc in variant_collections]) kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections])) for key, value in variant_collections[0].to_dict().items(): # If some optional parameter isn't explicitly specified as an # argument to union() or intersection() then use the same value # as the first VariantCollection. # # I'm doing this so that the meaning of VariantCollection.union # and VariantCollection.intersection with a single argument is # the identity function (rather than setting optional parameters # to their default values. if key not in kwargs: kwargs[key] = value return cls(**kwargs)
python
{ "resource": "" }
q5628
VariantCollection.union
train
def union(self, *others, **kwargs): """ Returns the union of variants in a several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
python
{ "resource": "" }
q5629
VariantCollection.intersection
train
def intersection(self, *others, **kwargs): """ Returns the intersection of variants in several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.intersection, variant_collections=(self,) + others, kwargs=kwargs)
python
{ "resource": "" }
q5630
VariantCollection.to_dataframe
train
def to_dataframe(self): """Build a DataFrame from this variant collection""" def row_from_variant(variant): return OrderedDict([ ("chr", variant.contig), ("start", variant.original_start), ("ref", variant.original_ref), ("alt", variant.original_alt), ("gene_name", ";".join(variant.gene_names)), ("gene_id", ";".join(variant.gene_ids)) ]) rows = [row_from_variant(v) for v in self] if len(rows) == 0: # TODO: return a DataFrame with the appropriate columns return pd.DataFrame() return pd.DataFrame.from_records(rows, columns=rows[0].keys())
python
{ "resource": "" }
q5631
trim_shared_suffix
train
def trim_shared_suffix(ref, alt): """ Reuse the `trim_shared_prefix` function above to implement similar functionality for string suffixes. Given ref='ABC' and alt='BC', we first revese both strings: reverse_ref = 'CBA' reverse_alt = 'CB' and then the result of calling trim_shared_prefix will be: ('A', '', 'CB') We then reverse all three of the result strings to get back the shared suffix and both prefixes leading up to it: ('A', '', 'BC') """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[-i - 1] == alt[-i - 1]: i += 1 # i is length of shared suffix. if i == 0: return (ref, alt, '') return (ref[:-i], alt[:-i], ref[-i:])
python
{ "resource": "" }
q5632
trim_shared_flanking_strings
train
def trim_shared_flanking_strings(ref, alt): """ Given two nucleotide or amino acid strings, identify if they have a common prefix, a common suffix, and return their unique components along with the prefix and suffix. For example, if the input ref = "SYFFQGR" and alt = "SYMLLFIFQGR" then the result will be: ("F", "MLLFI", "SY", "FQGR") """ ref, alt, prefix = trim_shared_prefix(ref, alt) ref, alt, suffix = trim_shared_suffix(ref, alt) return ref, alt, prefix, suffix
python
{ "resource": "" }
q5633
main
train
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes and predicted coding effects. Example usage: varcode --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) effects = variants.effects() if args.only_coding: effects = effects.drop_silent_and_noncoding() if args.one_per_variant: variant_to_effect_dict = effects.top_priority_effect_per_variant() effects = effects.clone_with_new_elements(list(variant_to_effect_dict.values())) effects_dataframe = effects.to_dataframe() logger.info('\n%s', effects) if args.output_csv: effects_dataframe.to_csv(args.output_csv, index=False)
python
{ "resource": "" }
q5634
get_codons
train
def get_codons( variant, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """ Returns indices of first and last reference codons affected by the variant, as well as the actual sequence of the mutated codons which replace those reference codons. Parameters ---------- variant : Variant trimmed_cdna_ref : str Trimmed reference cDNA nucleotides affected by the variant trimmed_cdna_alt : str Trimmed alternate cDNA nucleotides which replace the reference sequence_from_start_codon : str cDNA nucleotide coding sequence cds_offset : int Integer offset into the coding sequence where ref is replace with alt """ # index (starting from 0) of first affected reference codon ref_codon_start_offset = cds_offset // 3 # which nucleotide of the first codon got changed? nucleotide_offset_into_first_ref_codon = cds_offset % 3 n_ref_nucleotides = len(trimmed_cdna_ref) if n_ref_nucleotides == 0: if nucleotide_offset_into_first_ref_codon == 2: # if we're inserting between codons ref_codon_end_offset = ref_codon_start_offset else: # inserting inside a reference codon ref_codon_end_offset = ref_codon_start_offset + 1 ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # split the reference codon into nucleotides before/after insertion prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1] suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:] else: ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1 # codons in the reference sequence ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # We construct the new codons by taking the unmodified prefix # of the first ref codon, the unmodified suffix of the last ref codon # and sticking the alt nucleotides in between. # Since this is supposed to be an in-frame mutation, the concatenated # nucleotide string is expected to have a length that is a multiple of # three. prefix = ref_codons[:nucleotide_offset_into_first_ref_codon] offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3 if offset_in_last_ref_codon == 0: suffix = ref_codons[-2:] elif offset_in_last_ref_codon == 1: suffix = ref_codons[-1:] else: suffix = "" mutant_codons = prefix + trimmed_cdna_alt + suffix assert len(mutant_codons) % 3 == 0, \ "Expected in-frame mutation but got %s (length = %d)" % ( mutant_codons, len(mutant_codons)) return ref_codon_start_offset, ref_codon_end_offset, mutant_codons
python
{ "resource": "" }
q5635
predict_in_frame_coding_effect
train
def predict_in_frame_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides. """ ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons( variant=variant, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset) mutation_affects_start_codon = (ref_codon_start_offset == 0) if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS: # if we changed a start codon to something else then # we no longer know where the protein begins (or even in # what frame). # TODO: use the Kozak consensus sequence or a predictive model # to identify the most likely start site return StartLoss( variant=variant, transcript=transcript) # rely on Ensembl's annotation of the protein sequence since we can't # easily predict whether the starting nucleotide is a methionine # (most common) or leucine aa_ref = transcript.protein_sequence[ref_codon_start_offset:ref_codon_end_offset] reference_protein_length = len(transcript.protein_sequence) aa_alt, mutant_stop_codon_index, using_three_prime_utr = \ translate_in_frame_mutation( transcript=transcript, ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset=ref_codon_end_offset, mutant_codons=mutant_codons) mutant_codons_contain_stop = mutant_stop_codon_index != -1 # trim shared subsequences at the start and end of reference # and mutated amino acid sequences aa_ref, aa_alt, shared_prefix, shared_suffix = \ trim_shared_flanking_strings( aa_ref, aa_alt) n_aa_ref = len(aa_ref) n_aa_alt = len(aa_alt) n_aa_shared = len(shared_prefix) is_insertion = (ref_codon_start_offset == ref_codon_end_offset) # index of first amino acid which is different from the reference aa_mutation_start_offset = ( ref_codon_start_offset + n_aa_shared + is_insertion) if mutant_codons_contain_stop: mutant_stop_codon_index += n_aa_shared if mutation_affects_start_codon and (aa_ref == aa_alt): # Substitution between start codons gets special treatment since, # though superficially synonymous, this could still potentially # cause a start loss / change in reading frame and might be worth # closer scrutiny return AlternateStartCodon( variant=variant, transcript=transcript, ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3]) n_ref_amino_acids_after_mutated_site = ( reference_protein_length - aa_mutation_start_offset - 1) if mutant_codons_contain_stop and ( n_aa_alt <= n_ref_amino_acids_after_mutated_site): # if the new coding sequence contains a stop codon, then this is a # PrematureStop mutation if it decreases the length of the protein return PrematureStop( variant=variant, transcript=transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) if (aa_mutation_start_offset > reference_protein_length) or ( n_aa_ref == n_aa_alt == 0): # if inserted nucleotides go after original stop codon or if nothing # is changed in the amino acid sequence then this is a Silent variant return Silent( variant=variant, transcript=transcript, aa_pos=aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix) elif using_three_prime_utr: # if non-silent mutation is at the end of the protein then # should be a stop-loss return StopLoss( variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt) elif n_aa_alt == 0: return Deletion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref) elif n_aa_ref == 0: return Insertion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_alt=aa_alt) elif n_aa_ref == n_aa_alt == 1: # simple substitution e.g. p.V600E return Substitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) else: # multiple amino acids were substituted e.g. p.VQQ39FF return ComplexSubstitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt)
python
{ "resource": "" }
q5636
insert_before
train
def insert_before(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` before `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 < offset <= len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset] suffix = sequence[offset:] return prefix + new_residues + suffix
python
{ "resource": "" }
q5637
insert_after
train
def insert_after(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` after `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 <= offset < len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset + 1] suffix = sequence[offset + 1:] return prefix + new_residues + suffix
python
{ "resource": "" }
q5638
substitute
train
def substitute(sequence, offset, ref, alt): """Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert """ n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref), \ "Reference %s at offset %d != expected reference %s" % \ (sequence_ref, offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
python
{ "resource": "" }
q5639
infer_genome
train
def infer_genome(genome_object_string_or_int): """ If given an integer, return associated human EnsemblRelease for that Ensembl version. If given a string, return latest EnsemblRelease which has a reference of the same name. If given a PyEnsembl Genome, simply return it. """ if isinstance(genome_object_string_or_int, Genome): return genome_object_string_or_int if is_integer(genome_object_string_or_int): return cached_release(genome_object_string_or_int) elif is_string(genome_object_string_or_int): # first infer the canonical reference name, e.g. mapping hg19 -> GRCh37 # and then get the associated PyEnsembl Genome object reference_name = infer_reference_name(genome_object_string_or_int) return genome_for_reference_name(reference_name) else: raise TypeError( ("Expected genome to be an int, string, or pyensembl.Genome " "instance, got %s : %s") % ( str(genome_object_string_or_int), type(genome_object_string_or_int)))
python
{ "resource": "" }
q5640
Variant.genes
train
def genes(self): """ Return Gene object for all genes which overlap this variant. """ if self._genes is None: self._genes = self.ensembl.genes_at_locus( self.contig, self.start, self.end) return self._genes
python
{ "resource": "" }
q5641
Variant.is_insertion
train
def is_insertion(self): """ Does this variant represent the insertion of nucleotides into the reference genome? """ # An insertion would appear in a VCF like C>CT, so that the # alternate allele starts with the reference nucleotides. # Since the nucleotide strings may be normalized in the constructor, # it's worth noting that the normalized form of this variant would be # ''>'T', so that 'T'.startswith('') still holds. return (len(self.ref) < len(self.alt)) and self.alt.startswith(self.ref)
python
{ "resource": "" }
q5642
Variant.is_deletion
train
def is_deletion(self): """ Does this variant represent the deletion of nucleotides from the reference genome? """ # A deletion would appear in a VCF like CT>C, so that the # reference allele starts with the alternate nucleotides. # This is true even in the normalized case, where the alternate # nucleotides are an empty string. return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
python
{ "resource": "" }
q5643
Variant.is_snv
train
def is_snv(self): """Is the variant a single nucleotide variant""" return (len(self.ref) == len(self.alt) == 1) and (self.ref != self.alt)
python
{ "resource": "" }
q5644
Variant.is_transition
train
def is_transition(self): """Is this variant and pyrimidine to pyrimidine change or purine to purine change""" return self.is_snv and is_purine(self.ref) == is_purine(self.alt)
python
{ "resource": "" }
q5645
Variant.is_transversion
train
def is_transversion(self): """Is this variant a pyrimidine to purine change or vice versa""" return self.is_snv and is_purine(self.ref) != is_purine(self.alt)
python
{ "resource": "" }
q5646
variant_overlaps_interval
train
def variant_overlaps_interval( variant_start, n_ref_bases, interval_start, interval_end): """ Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position """ if n_ref_bases == 0: # insertions only overlap intervals which start before and # end after the insertion point, they must be fully contained # by the other interval return interval_start <= variant_start and interval_end >= variant_start variant_end = variant_start + n_ref_bases """ if self._changes_exonic_splice_site( strand_ref, strand_alt,) """ # overlap means other interval starts before this variant ends # and the interval ends after this variant starts return interval_start <= variant_end and interval_end >= variant_start
python
{ "resource": "" }
q5647
changes_exonic_splice_site
train
def changes_exonic_splice_site( transcript_offset, transcript, transcript_ref, transcript_alt, exon_start_offset, exon_end_offset, exon_number): """Does the given exonic mutation of a particular transcript change a splice site? Parameters ---------- transcript_offset : int Offset from start of transcript of first reference nucleotide (or the last nucleotide before an insertion) transcript : pyensembl.Transcript transcript_ref : str Reference nucleotides transcript_alt : alt Alternate nucleotides exon_start_offset : int Start offset of exon relative to beginning of transcript exon_end_offset : int End offset of exon relative to beginning of transcript exon_number : int Which exon in the order they form the transcript """ # first we're going to make sure the variant doesn't disrupt the # splicing sequences we got from Divina et. al's # Ab initio prediction of mutation-induced cryptic # splice-site activation and exon skipping # (http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/) # # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # # 3' splice site: YAG|R # if exon_number > 1 and transcript_offset == exon_start_offset: # if this is any exon past the first, check to see if it lost # the purine on its left side # # the 3' splice site sequence has just a single purine on # the exon side if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES: if len(transcript_alt) > 0: if transcript_alt[0] not in PURINE_NUCLEOTIDES: return True else: # if the mutation is a deletion, are there ref nucleotides # afterward? offset_after_deletion = transcript_offset + len(transcript_ref) if len(transcript.sequence) > offset_after_deletion: next_base = transcript.sequence[offset_after_deletion] if next_base not in PURINE_NUCLEOTIDES: return True if exon_number < len(transcript.exons): # if the mutation affects an exon whose right end gets spliced # to a next exon, check if the variant alters the exon side of # 5' consensus splicing sequence # # splicing sequence: # MAG|GURAGU # M is A or C; R is purine; | is the exon-intron boundary # # TODO: check for overlap of two intervals instead of just # seeing if the mutation starts inside the exonic splice site if variant_overlaps_interval( variant_start=transcript_offset, n_ref_bases=len(transcript_ref), interval_start=exon_end_offset - 2, interval_end=exon_end_offset): end_of_reference_exon = transcript.sequence[ exon_end_offset - 2:exon_end_offset + 1] if matches_exon_end_pattern(end_of_reference_exon): # if the last three nucleotides conform to the consensus # sequence then treat any deviation as an ExonicSpliceSite # mutation end_of_variant_exon = end_of_reference_exon if matches_exon_end_pattern(end_of_variant_exon): # end of exon matches splicing signal, check if it still # does after the mutation return True
python
{ "resource": "" }
q5648
is_purine
train
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
python
{ "resource": "" }
q5649
normalize_nucleotide_string
train
def normalize_nucleotide_string( nucleotides, allow_extended_nucleotides=False, empty_chars=".-", treat_nan_as_empty=True): """ Normalizes a nucleotide string by converting various ways of encoding empty strings into "", making all letters upper case, and checking to make sure all letters in the string are actually nucleotides. Parameters ---------- nucleotides : str Sequence of nucleotides, e.g. "ACCTG" extended_nucleotides : bool Allow non-canonical nucleotide characters like 'X' for unknown base empty_chars : str Characters which encode empty strings, such as "." used in VCF format or "-" used in MAF format treat_nan_as_empty : bool Some MAF files represent deletions/insertions with NaN ref/alt values """ if nucleotides in empty_chars: return "" elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan(nucleotides): return "" require_string(nucleotides, name="nucleotide string") nucleotides = nucleotides.upper() if allow_extended_nucleotides: valid_nucleotides = EXTENDED_NUCLEOTIDES else: valid_nucleotides = STANDARD_NUCLEOTIDES if not set(nucleotides) <= valid_nucleotides: raise ValueError( "Invalid character(s) in nucleotide string: %s" % ( ",".join(set(nucleotides) - valid_nucleotides),)) return nucleotides
python
{ "resource": "" }
q5650
load_vcf
train
def load_vcf( path, genome=None, reference_vcf_key="reference", only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True): """ Load reference name and Variant objects from the given VCF filename. Currently only local files are supported by this function (no http). If you call this on an HTTP URL, it will fall back to `load_vcf`. Parameters ---------- path : str Path to VCF (*.vcf) or compressed VCF (*.vcf.gz). genome : {pyensembl.Genome, reference name, Ensembl version int}, optional Optionally pass in a PyEnsembl Genome object, name of reference, or PyEnsembl release version to specify the reference associated with a VCF (otherwise infer reference from VCF using reference_vcf_key) reference_vcf_key : str, optional Name of metadata field which contains path to reference FASTA file (default = 'reference') only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. allow_extended_nucleotides : boolean, default False Allow characters other that A,C,T,G in the ref and alt strings. include_info : boolean, default True Whether to parse the INFO and per-sample columns. If you don't need these, set to False for faster parsing. chunk_size: int, optional Number of records to load in memory at once. max_variants : int, optional If specified, return only the first max_variants variants. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : boolean, default True Don't keep repeated variants """ require_string(path, "Path or URL to VCF") parsed_path = parse_url_or_path(path) if parsed_path.scheme and parsed_path.scheme.lower() != "file": # pandas.read_table nominally supports HTTP, but it tends to crash on # large files and does not support gzip. Switching to the python-based # implementation of read_table (with engine="python") helps with some # issues but introduces a new set of problems (e.g. the dtype parameter # is not accepted). For these reasons, we're currently not attempting # to load VCFs over HTTP with pandas directly, and instead download it # to a temporary file and open that. (filename, headers) = urllib.request.urlretrieve(path) try: # The downloaded file has no file extension, which confuses pyvcf # for gziped files in Python 3. We rename it to have the correct # file extension. new_filename = "%s.%s" % ( filename, parsed_path.path.split(".")[-1]) os.rename(filename, new_filename) filename = new_filename return load_vcf( filename, genome=genome, reference_vcf_key=reference_vcf_key, only_passing=only_passing, allow_extended_nucleotides=allow_extended_nucleotides, include_info=include_info, chunk_size=chunk_size, max_variants=max_variants, sort_key=sort_key, distinct=distinct) finally: logger.info("Removing temporary file: %s", filename) os.unlink(filename) # Loading a local file. # The file will be opened twice: first to parse the header with pyvcf, then # by pandas to read the data. # PyVCF reads the metadata immediately and stops at the first line with # data. We can close the file after that. handle = PyVCFReaderFromPathOrURL(path) handle.close() genome = infer_genome_from_vcf( genome, handle.vcf_reader, reference_vcf_key) df_iterator = read_vcf_into_dataframe( path, include_info=include_info, sample_names=handle.vcf_reader.samples if include_info else None, chunk_size=chunk_size) if include_info: def sample_info_parser(unparsed_sample_info_strings, format_string): """ Given a format string like "GT:AD:ADP:DP:FS" and a list of sample info strings where each entry is like "0/1:3,22:T=3,G=22:25:33", return a dict that maps: sample name -> field name -> value. Uses pyvcf to parse the fields. """ return pyvcf_calls_to_sample_info_list( handle.vcf_reader._parse_samples( unparsed_sample_info_strings, format_string, None)) else: sample_info_parser = None return dataframes_to_variant_collection( df_iterator, source_path=path, info_parser=handle.vcf_reader._parse_info if include_info else None, only_passing=only_passing, max_variants=max_variants, sample_names=handle.vcf_reader.samples if include_info else None, sample_info_parser=sample_info_parser, variant_kwargs={ 'ensembl': genome, 'allow_extended_nucleotides': allow_extended_nucleotides}, variant_collection_kwargs={ 'sort_key': sort_key, 'distinct': distinct})
python
{ "resource": "" }
q5651
dataframes_to_variant_collection
train
def dataframes_to_variant_collection( dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={}): """ Load a VariantCollection from an iterable of pandas dataframes. This takes an iterable of dataframes instead of a single dataframe to avoid having to load huge dataframes at once into memory. If you have a single dataframe, just pass it in a single-element list. Parameters ---------- dataframes Iterable of dataframes (e.g. a generator). Expected columns are: ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] and 'INFO' if `info_parser` is not Null. Columns must be in this order. source_path : str Path of VCF file from which DataFrame chunks were generated. info_parser : string -> object, optional Callable to parse INFO strings. only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. max_variants : int, optional If specified, return only the first max_variants variants. sample_names : list of strings, optional Sample names. The final columns of the dataframe should match these. If specified, the per-sample info columns will be parsed. You must also specify sample_info_parser. sample_info_parser : string list * string -> dict, optional Callable to parse per-sample info columns. variant_kwargs : dict, optional Additional keyword paramters to pass to Variant.__init__ variant_collection_kwargs : dict, optional Additional keyword parameters to pass to VariantCollection.__init__. """ expected_columns = ( ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] + (["INFO"] if info_parser else [])) if info_parser and sample_names: if sample_info_parser is None: raise TypeError( "Must specify sample_info_parser if specifying sample_names") expected_columns.append("FORMAT") expected_columns.extend(sample_names) variants = [] metadata = {} try: for chunk in dataframes: assert chunk.columns.tolist() == expected_columns,\ "dataframe columns (%s) do not match expected columns (%s)" % ( chunk.columns, expected_columns) for tpl in chunk.itertuples(): (i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8] if flter == ".": flter = None elif flter == "PASS": flter = [] elif only_passing: continue else: flter = flter.split(';') if id_ == ".": id_ = None qual = float(qual) if qual != "." else None alt_num = 0 info = sample_info = None for alt in alts.split(","): if alt != ".": if info_parser is not None and info is None: info = info_parser(tpl[8]) # INFO column if sample_names: # Sample name -> field -> value dict. sample_info = sample_info_parser( list(tpl[10:]), # sample info columns tpl[9], # FORMAT column ) variant = Variant( chrom, int(pos), # want a Python int not numpy.int64 ref, alt, **variant_kwargs) variants.append(variant) metadata[variant] = { 'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num, } if max_variants and len(variants) > max_variants: raise StopIteration alt_num += 1 except StopIteration: pass return VariantCollection( variants=variants, source_to_metadata_dict={source_path: metadata}, **variant_collection_kwargs)
python
{ "resource": "" }
q5652
read_vcf_into_dataframe
train
def read_vcf_into_dataframe( path, include_info=False, sample_names=None, chunk_size=None): """ Load the data of a VCF into a pandas dataframe. All headers are ignored. Parameters ---------- path : str Path to local file. HTTP and other protocols are not implemented. include_info : boolean, default False If true, the INFO field is not parsed, but is included as a string in the resulting data frame. If false, the INFO field is omitted. sample_names: string list, optional Sample names. The final columns of the dataframe should match these. If specified (and include_info is also specified), the FORMAT and per-sample info columns will be included in the result dataframe. chunk_size : int, optional If buffering is desired, the number of rows per chunk. Returns --------- If chunk_size is None (the default), a dataframe with the contents of the VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows. """ vcf_field_types = OrderedDict() vcf_field_types['CHROM'] = str vcf_field_types['POS'] = int vcf_field_types['ID'] = str vcf_field_types['REF'] = str vcf_field_types['ALT'] = str vcf_field_types['QUAL'] = str vcf_field_types['FILTER'] = str if include_info: vcf_field_types['INFO'] = str if sample_names: vcf_field_types['FORMAT'] = str for name in sample_names: vcf_field_types[name] = str parsed_path = parse_url_or_path(path) if not parsed_path.scheme or parsed_path.scheme.lower() == "file": path = parsed_path.path else: raise NotImplementedError("Only local files are supported.") compression = None if path.endswith(".gz"): compression = "gzip" elif path.endswith(".bz2"): compression = "bz2" reader = pandas.read_table( path, compression=compression, comment="#", chunksize=chunk_size, dtype=vcf_field_types, names=list(vcf_field_types), usecols=range(len(vcf_field_types))) return reader
python
{ "resource": "" }
q5653
stream_gzip_decompress_lines
train
def stream_gzip_decompress_lines(stream): """ Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines. """ dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = "" for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split("\n") previous = lines.pop() for line in lines: yield line yield previous
python
{ "resource": "" }
q5654
infer_genome_from_vcf
train
def infer_genome_from_vcf(genome, vcf_reader, reference_vcf_key): """ Helper function to make a pyensembl.Genome instance. """ if genome: return infer_genome(genome) elif reference_vcf_key not in vcf_reader.metadata: raise ValueError("Unable to infer reference genome for %s" % ( vcf_reader.filename,)) else: reference_path = vcf_reader.metadata[reference_vcf_key] return infer_genome(reference_path)
python
{ "resource": "" }
q5655
cdna_codon_sequence_after_insertion_frameshift
train
def cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides): """ Returns index of mutated codon and nucleotide sequence starting at the first mutated codon. """ # special logic for insertions coding_sequence_after_insertion = \ sequence_from_start_codon[cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: # insertion happens after last nucleotide in a codon, # doesn't disrupt the existing codon from cds_offset-2 to cds_offset mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = "" elif cds_offset_before_insertion % 3 == 1: # insertion happens after 2nd nucleotide of a codon # codon positions: # 1) cds_offset - 1 # 2) cds_offset # <----- Insertsion # 3) cds_offset + 1 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain two reference # nucleotides before the insertion nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: # insertion happens after 1st nucleotide of a codon # codon positions: # 1) cds_offset # <----- Insertsion # 2) cds_offset + 1 # 3) cds_offset + 2 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain one reference # nucleotide before the insertion nucleotides_before = sequence_from_start_codon[cds_offset_before_insertion] sequence_from_mutated_codon = ( nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
python
{ "resource": "" }
q5656
cdna_codon_sequence_after_deletion_or_substitution_frameshift
train
def cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt): """ Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward. """ mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript. sequence_after_mutated_codon = \ sequence_from_start_codon[mutated_codon_index * 3:] # the variant's ref nucleotides should start either 0, 1, or 2 nucleotides # into `sequence_after_mutated_codon` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute( sequence=sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref=trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
python
{ "resource": "" }
q5657
predict_frameshift_coding_effect
train
def predict_frameshift_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset, sequence_from_start_codon): """ Coding effect of a frameshift mutation. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : nucleotide sequence Reference nucleotides in the coding sequence of the given transcript. trimmed_cdna_alt : nucleotide sequence Alternate nucleotides introduced by mutation cds_offset : int Offset into the CDS of first ref nucleotide. For insertions, this is the offset of the last ref nucleotide before the insertion. sequence_from_start_codon : nucleotide sequence Nucleotides of the coding sequence and 3' UTR """ if len(trimmed_cdna_ref) != 0: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt) else: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset_before_insertion=cds_offset, inserted_nucleotides=trimmed_cdna_alt) return create_frameshift_effect( mutated_codon_index=mutated_codon_index, sequence_from_mutated_codon=sequence_from_mutated_codon, variant=variant, transcript=transcript)
python
{ "resource": "" }
q5658
predict_variant_effects
train
def predict_variant_effects(variant, raise_on_error=False): """Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue. """ # if this variant isn't overlapping any genes, return a # Intergenic effect # TODO: look for nearby genes and mark those as Upstream and Downstream # effects if len(variant.gene_ids) == 0: effects = [Intergenic(variant)] else: # list of all MutationEffects for all genes & transcripts effects = [] # group transcripts by their gene ID transcripts_grouped_by_gene = groupby_field(variant.transcripts, 'gene_id') # want effects in the list grouped by the gene they come from for gene_id in sorted(variant.gene_ids): if gene_id not in transcripts_grouped_by_gene: # intragenic variant overlaps a gene but not any transcripts gene = variant.ensembl.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: # gene ID has transcripts overlapped by this variant for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript( variant=variant, transcript=transcript) else: effect = predict_variant_effect_on_transcript_or_failure( variant=variant, transcript=transcript) effects.append(effect) return EffectCollection(effects)
python
{ "resource": "" }
q5659
predict_variant_effect_on_transcript_or_failure
train
def predict_variant_effect_on_transcript_or_failure(variant, transcript): """ Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values. """ try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript)
python
{ "resource": "" }
q5660
choose_intronic_effect_class
train
def choose_intronic_effect_class( variant, nearest_exon, distance_to_exon): """ Infer effect of variant which does not overlap any exon of the given transcript. """ assert distance_to_exon > 0, \ "Expected intronic effect to have distance_to_exon > 0, got %d" % ( distance_to_exon,) if nearest_exon.strand == "+": # if exon on positive strand start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or (variant.is_insertion and start_same) else: # if exon on negative strand end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or (variant.is_insertion and end_same) # distance cutoffs based on consensus splice sequences from # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/ # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # 3' splice site: YAG|R if distance_to_exon <= 2: if before_exon: # 2 last nucleotides of intron before exon are the splice # acceptor site, typically "AG" return SpliceAcceptor else: # 2 first nucleotides of intron after exon are the splice donor # site, typically "GT" return SpliceDonor elif not before_exon and distance_to_exon <= 6: # variants in nucleotides 3-6 at start of intron aren't as certain # to cause problems as nucleotides 1-2 but still implicated in # alternative splicing return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: # nucleotide -3 before exon is part of the 3' splicing # motif but allows for more degeneracy than the -2, -1 nucleotides return IntronicSpliceSite else: # intronic mutation unrelated to splicing return Intronic
python
{ "resource": "" }
q5661
exonic_transcript_effect
train
def exonic_transcript_effect(variant, exon, exon_number, transcript): """Effect of this variant on a Transcript, assuming we already know that this variant overlaps some exon of the transcript. Parameters ---------- variant : Variant exon : pyensembl.Exon Exon which this variant overlaps exon_number : int Index (starting from 1) of the given exon in the transcript's sequence of exons. transcript : pyensembl.Transcript """ genome_ref = variant.trimmed_ref genome_alt = variant.trimmed_alt variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end # clip mutation to only affect the current exon if variant_start < exon.start: # if mutation starts before current exon then only look # at nucleotides which overlap the exon logger.info('Mutation in variant %s starts before exon %s', variant, exon) assert len(genome_ref) > 0, "Unexpected insertion into intron" n_skip_start = exon.start - variant_start genome_ref = genome_ref[n_skip_start:] genome_alt = genome_alt[n_skip_start:] genome_start = exon.start else: genome_start = variant_start if variant_end > exon.end: # if mutation goes past exon end then only look at nucleotides # which overlap the exon logger.info('Mutation in variant %s ends after exon %s', variant, exon) n_skip_end = variant_end - exon.end genome_ref = genome_ref[:-n_skip_end] genome_alt = genome_alt[:len(genome_ref)] genome_end = exon.end else: genome_end = variant_end transcript_offset = interval_offset_on_transcript( genome_start, genome_end, transcript) if transcript.on_backward_strand: cdna_ref = reverse_complement(genome_ref) cdna_alt = reverse_complement(genome_alt) else: cdna_ref = genome_ref cdna_alt = genome_alt n_ref = len(cdna_ref) expected_ref = str( transcript.sequence[transcript_offset:transcript_offset + n_ref]) if cdna_ref != expected_ref: raise ValueError( ("Found ref nucleotides '%s' in sequence" " of %s at offset %d (chromosome positions %d:%d)" " but variant %s has '%s'") % ( expected_ref, transcript, transcript_offset, genome_start, genome_end, variant, cdna_ref)) utr5_length = min(transcript.start_codon_spliced_offsets) # does the variant start inside the 5' UTR? if utr5_length > transcript_offset: # does the variant end after the 5' UTR, within the coding region? if utr5_length < transcript_offset + n_ref: # TODO: we *might* lose the Kozak sequence or the start codon # but without looking at the modified sequence how can we tell # for sure that this is a start-loss variant? return StartLoss(variant, transcript) else: # if variant contained within 5' UTR return FivePrimeUTR(variant, transcript) utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1 if transcript_offset >= utr3_offset: return ThreePrimeUTR(variant, transcript) exon_start_offset = interval_offset_on_transcript( exon.start, exon.end, transcript) exon_end_offset = exon_start_offset + len(exon) - 1 # Further below we're going to try to predict exonic splice site # modifications, which will take this effect_annotation as their # alternative hypothesis for what happens if splicing doesn't change. # If the mutation doesn't affect an exonic splice site, then # we'll just return this effect. coding_effect_annotation = predict_variant_coding_effect_on_transcript( variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref, trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset) if changes_exonic_splice_site( transcript=transcript, transcript_ref=cdna_ref, transcript_alt=cdna_alt, transcript_offset=transcript_offset, exon_start_offset=exon_start_offset, exon_end_offset=exon_end_offset, exon_number=exon_number): return ExonicSpliceSite( variant=variant, transcript=transcript, exon=exon, alternate_effect=coding_effect_annotation) return coding_effect_annotation
python
{ "resource": "" }
q5662
apply_groupby
train
def apply_groupby(records, fn, skip_none=False): """ Given a list of objects, group them into a dictionary by applying fn to each one and using returned values as a dictionary key. Parameters ---------- records : list fn : function skip_none : bool If False, then None can be a key in the returned dictionary, otherwise records whose key value is None get skipped. Returns dict. """ # create an empty list for every new key groups = defaultdict(list) for record in records: value = fn(record) if value is not None or not skip_none: groups[value].append(record) return dict(groups)
python
{ "resource": "" }
q5663
groupby_field
train
def groupby_field(records, field_name, skip_none=True): """ Given a list of objects, group them into a dictionary by the unique values of a given field name. """ return apply_groupby( records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
python
{ "resource": "" }
q5664
memoize
train
def memoize(fn): """ Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """ memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
python
{ "resource": "" }
q5665
EffectCollection.filter_by_transcript_expression
train
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value)
python
{ "resource": "" }
q5666
EffectCollection.filter_by_gene_expression
train
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters effects to those which have an associated gene whose expression value in the gene_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_above_threshold( key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value)
python
{ "resource": "" }
q5667
EffectCollection.filter_by_effect_priority
train
def filter_by_effect_priority(self, min_priority_class): """ Create a new EffectCollection containing only effects whose priority falls below the given class. """ min_priority = transcript_effect_priority_dict[min_priority_class] return self.filter( lambda effect: effect_priority(effect) >= min_priority)
python
{ "resource": "" }
q5668
EffectCollection.top_priority_effect_per_variant
train
def top_priority_effect_per_variant(self): """Highest priority effect for each unique variant""" return OrderedDict( (variant, top_priority_effect(variant_effects)) for (variant, variant_effects) in self.groupby_variant().items())
python
{ "resource": "" }
q5669
EffectCollection.top_priority_effect_per_transcript_id
train
def top_priority_effect_per_transcript_id(self): """Highest priority effect for each unique transcript ID""" return OrderedDict( (transcript_id, top_priority_effect(variant_effects)) for (transcript_id, variant_effects) in self.groupby_transcript_id().items())
python
{ "resource": "" }
q5670
EffectCollection.top_priority_effect_per_gene_id
train
def top_priority_effect_per_gene_id(self): """Highest priority effect for each unique gene ID""" return OrderedDict( (gene_id, top_priority_effect(variant_effects)) for (gene_id, variant_effects) in self.groupby_gene_id().items())
python
{ "resource": "" }
q5671
EffectCollection.top_expression_effect
train
def top_expression_effect(self, expression_levels): """ Return effect whose transcript has the highest expression level. If none of the effects are expressed or have associated transcripts, then return None. In case of ties, add lexicographical sorting by effect priority and transcript length. """ effect_expression_dict = self.effect_expression(expression_levels) if len(effect_expression_dict) == 0: return None def key_fn(effect_fpkm_pair): """ Sort effects primarily by their expression level and secondarily by the priority logic used in `top_priority_effect`. """ (effect, fpkm) = effect_fpkm_pair return (fpkm, multi_gene_effect_sort_key(effect)) return max(effect_expression_dict.items(), key=key_fn)[0]
python
{ "resource": "" }
q5672
EffectCollection.to_dataframe
train
def to_dataframe(self): """Build a dataframe from the effect collection""" # list of properties to extract from Variant objects if they're # not None variant_properties = [ "contig", "start", "ref", "alt", "is_snv", "is_transversion", "is_transition" ] def row_from_effect(effect): row = OrderedDict() row['variant'] = str(effect.variant.short_description) for field_name in variant_properties: # if effect.variant is None then this column value will be None row[field_name] = getattr(effect.variant, field_name, None) row['gene_id'] = effect.gene_id row['gene_name'] = effect.gene_name row['transcript_id'] = effect.transcript_id row['transcript_name'] = effect.transcript_name row['effect_type'] = effect.__class__.__name__ row['effect'] = effect.short_description return row return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
python
{ "resource": "" }
q5673
random_variants
train
def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): """ Generate a VariantCollection with random variants that overlap at least one complete coding transcript. """ rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] # we should finish way before this loop is over but just in case # something is wrong with PyEnsembl we want to avoid an infinite loop for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
python
{ "resource": "" }
q5674
MSLDAP.get_server_info
train
def get_server_info(self, anonymous = True): """ Performs bind on the server and grabs the DSA info object. If anonymous is set to true, then it will perform anonymous bind, not using user credentials Otherwise it will use the credentials set in the object constructor. """ if anonymous == True: logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) conn = Connection(server, auto_bind=True) logger.debug('Got server info') else: logger.debug('Getting server info via credentials supplied on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self.target_server.is_ssl(), get_info=ALL) if self.use_sspi == True: conn = self.monkeypatch() else: conn = Connection(self._srv, user=self.login_credential.get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod()) logger.debug('Performing BIND to server %s' % self.target_server.get_host()) if not self._con.bind(): if 'description' in self._con.result: raise Exception('Failed to bind to server! Reason: %s' % conn.result['description']) raise Exception('Failed to bind to server! Reason: %s' % conn.result) logger.debug('Connected to server!') return server.info
python
{ "resource": "" }
q5675
MSLDAP.get_all_user_objects
train
def get_all_user_objects(self): """ Fetches all user objects from the AD, and returns MSADUser object """ logger.debug('Polling AD for all user objects') ldap_filter = r'(objectClass=user)' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
{ "resource": "" }
q5676
MSLDAP.get_all_knoreq_user_objects
train
def get_all_knoreq_user_objects(self, include_machine = False): """ Fetches all user objects with useraccountcontrol DONT_REQ_PREAUTH flag set from the AD, and returns MSADUser object. """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(userAccountControl:1.2.840.113556.1.4.803:=4194304)' else: ldap_filter = r'(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
python
{ "resource": "" }
q5677
vn
train
def vn(x): """ value or none, returns none if x is an empty list """ if x == []: return None if isinstance(x, list): return '|'.join(x) if isinstance(x, datetime): return x.isoformat() return x
python
{ "resource": "" }
q5678
BotoSqliteEngine.load_tables
train
def load_tables(self, query, meta): """ Load necessary resources tables into db to execute given query. """ try: for table in meta.tables: self.load_table(table) except NoCredentialsError: help_link = 'http://boto3.readthedocs.io/en/latest/guide/configuration.html' raise QueryError('Unable to locate AWS credential. ' 'Please see {0} on how to configure AWS credential.'.format(help_link))
python
{ "resource": "" }
q5679
BotoSqliteEngine.load_table
train
def load_table(self, table): """ Load resources as specified by given table into our db. """ region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) # we use underscore "_" instead of dash "-" for region name but boto3 need dash boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
python
{ "resource": "" }
q5680
json_serialize
train
def json_serialize(obj): """ Simple generic JSON serializer for common objects. """ if isinstance(obj, datetime): return obj.isoformat() if hasattr(obj, 'id'): return jsonify(obj.id) if hasattr(obj, 'name'): return jsonify(obj.name) raise TypeError('{0} is not JSON serializable'.format(obj))
python
{ "resource": "" }
q5681
json_get
train
def json_get(serialized_object, field): """ This emulates the HSTORE `->` get value operation. It get value from JSON serialized column by given key and return `null` if not present. Key can be either an integer for array index access or a string for object field access. :return: JSON serialized value of key in object """ # return null if serialized_object is null or "serialized null" if serialized_object is None: return None obj = json.loads(serialized_object) if obj is None: return None if isinstance(field, int): # array index access res = obj[field] if 0 <= field < len(obj) else None else: # object field access res = obj.get(field) if not isinstance(res, (int, float, string_types)): res = json.dumps(res) return res
python
{ "resource": "" }
q5682
create_table
train
def create_table(db, schema_name, table_name, columns): """ Create a table, schema_name.table_name, in given database with given list of column names. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name db.execute('DROP TABLE IF EXISTS {0}'.format(table)) columns_list = ', '.join(columns) db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
python
{ "resource": "" }
q5683
insert_all
train
def insert_all(db, schema_name, table_name, columns, items): """ Insert all item in given items list into the specified table, schema_name.table_name. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name columns_list = ', '.join(columns) values_list = ', '.join(['?'] * len(columns)) query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format( table=table, columns=columns_list, values=values_list) for item in items: values = [getattr(item, col) for col in columns] db.execute(query, values)
python
{ "resource": "" }
q5684
Creator.get_events
train
def get_events(self, *args, **kwargs): """ Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set. """ from .event import Event, EventDataWrapper return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
python
{ "resource": "" }
q5685
Creator.get_series
train
def get_series(self, *args, **kwargs): """ Returns a full SeriesDataWrapper object for this creator. /creators/{creatorId}/series :returns: SeriesDataWrapper -- A new request to API. Contains full results set. """ from .series import Series, SeriesDataWrapper return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
python
{ "resource": "" }
q5686
Creator.get_stories
train
def get_stories(self, *args, **kwargs): """ Returns a full StoryDataWrapper object for this creator. /creators/{creatorId}/stories :returns: StoriesDataWrapper -- A new request to API. Contains full results set. """ from .story import Story, StoryDataWrapper return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
python
{ "resource": "" }
q5687
MarvelObject.list_to_instance_list
train
def list_to_instance_list(_self, _list, _Class): """ Takes a list of resource dicts and returns a list of resource instances, defined by the _Class param. :param _self: Original resource calling the method :type _self: core.MarvelObject :param _list: List of dicts describing a Resource. :type _list: list :param _Class: The Resource class to create a list of (Comic, Creator, etc). :type _Class: core.MarvelObject :returns: list -- List of Resource instances (Comic, Creator, etc). """ items = [] for item in _list: items.append(_Class(_self.marvel, item)) return items
python
{ "resource": "" }
q5688
Marvel._call
train
def _call(self, resource_url, params=None): """ Calls the Marvel API endpoint :param resource_url: url slug of the resource :type resource_url: str :param params: query params to add to endpoint :type params: str :returns: response -- Requests response """ url = "%s%s" % (self._endpoint(), resource_url) if params: url += "?%s&%s" % (params, self._auth()) else: url += "?%s" % self._auth() return requests.get(url)
python
{ "resource": "" }
q5689
Marvel.get_character
train
def get_character(self, id): """Fetches a single character by id. get /v1/public/characters :param id: ID of Character :type params: int :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_character(1009718) >>> print cdw.data.count 1 >>> print cdw.data.results[0].name Wolverine """ url = "%s/%s" % (Character.resource_url(), id) response = json.loads(self._call(url).text) return CharacterDataWrapper(self, response)
python
{ "resource": "" }
q5690
Marvel.get_characters
train
def get_characters(self, *args, **kwargs): """Fetches lists of comic characters with optional filters. get /v1/public/characters/{characterId} :returns: CharacterDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15") >>> print cdw.data.count 1401 >>> for result in cdw.data.results: ... print result.name Aginar Air-Walker (Gabriel Lan) Ajak Ajaxis Akemi """ #pass url string and params string to _call response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text) return CharacterDataWrapper(self, response, kwargs)
python
{ "resource": "" }
q5691
Marvel.get_comics
train
def get_comics(self, *args, **kwargs): """ Fetches list of comics. get /v1/public/comics :returns: ComicDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15") >>> print cdw.data.count 10 >>> print cdw.data.results[0].name Some Comic """ response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text) return ComicDataWrapper(self, response)
python
{ "resource": "" }
q5692
Marvel.get_creator
train
def get_creator(self, id): """Fetches a single creator by id. get /v1/public/creators/{creatorId} :param id: ID of Creator :type params: int :returns: CreatorDataWrapper >>> m = Marvel(public_key, private_key) >>> cdw = m.get_creator(30) >>> print cdw.data.count 1 >>> print cdw.data.result.fullName Stan Lee """ url = "%s/%s" % (Creator.resource_url(), id) response = json.loads(self._call(url).text) return CreatorDataWrapper(self, response)
python
{ "resource": "" }
q5693
Marvel.get_event
train
def get_event(self, id): """Fetches a single event by id. get /v1/public/event/{eventId} :param id: ID of Event :type params: int :returns: EventDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_event(253) >>> print response.data.result.title Infinity Gauntlet """ url = "%s/%s" % (Event.resource_url(), id) response = json.loads(self._call(url).text) return EventDataWrapper(self, response)
python
{ "resource": "" }
q5694
Marvel.get_single_series
train
def get_single_series(self, id): """Fetches a single comic series by id. get /v1/public/series/{seriesId} :param id: ID of Series :type params: int :returns: SeriesDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_single_series(12429) >>> print response.data.result.title 5 Ronin (2010) """ url = "%s/%s" % (Series.resource_url(), id) response = json.loads(self._call(url).text) return SeriesDataWrapper(self, response)
python
{ "resource": "" }
q5695
Marvel.get_story
train
def get_story(self, id): """Fetches a single story by id. get /v1/public/stories/{storyId} :param id: ID of Story :type params: int :returns: StoryDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_story(29) >>> print response.data.result.title Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself... """ url = "%s/%s" % (Story.resource_url(), id) response = json.loads(self._call(url).text) return StoryDataWrapper(self, response)
python
{ "resource": "" }
q5696
Marvel.get_stories
train
def get_stories(self, *args, **kwargs): """Fetches lists of stories. get /v1/public/stories :returns: StoryDataWrapper >>> #Find all the stories that involved both Hulk and Wolverine >>> #hulk's id: 1009351 >>> #wolverine's id: 1009718 >>> m = Marvel(public_key, private_key) >>> response = m.get_stories(characters="1009351,1009718") >>> print response.data.total 4066 >>> stories = response.data.results >>> print stories[1].title Cover #477 """ response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text) return StoryDataWrapper(self, response)
python
{ "resource": "" }
q5697
Story.get_creators
train
def get_creators(self, *args, **kwargs): """ Returns a full CreatorDataWrapper object for this story. /stories/{storyId}/creators :returns: CreatorDataWrapper -- A new request to API. Contains full results set. """ from .creator import Creator, CreatorDataWrapper return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
python
{ "resource": "" }
q5698
Story.get_characters
train
def get_characters(self, *args, **kwargs): """ Returns a full CharacterDataWrapper object for this story. /stories/{storyId}/characters :returns: CharacterDataWrapper -- A new request to API. Contains full results set. """ from .character import Character, CharacterDataWrapper return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
python
{ "resource": "" }
q5699
ffmpeg_version
train
def ffmpeg_version(): """Returns the available ffmpeg version Returns ---------- version : str version number as string """ cmd = [ 'ffmpeg', '-version' ] output = sp.check_output(cmd) aac_codecs = [ x for x in output.splitlines() if "ffmpeg version " in str(x) ][0] hay = aac_codecs.decode('ascii') match = re.findall(r'ffmpeg version (\d+\.)?(\d+\.)?(\*|\d+)', hay) if match: return "".join(match[0]) else: return None
python
{ "resource": "" }