code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint)
def invoke_script(self, script, id=None, endpoint=None)
Invokes a script that has been assembled Args: script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
6.021935
7.306885
0.824145
return self._call_endpoint(SEND_TX, params=[serialized_tx], id=id, endpoint=endpoint)
def send_raw_tx(self, serialized_tx, id=None, endpoint=None)
Submits a serialized tx to the network Args: serialized_tx: (str) a hexlified string of a transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bool: whether the tx was accepted or not
5.242824
7.11699
0.736663
return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)
def validate_addr(self, address, id=None, endpoint=None)
returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
6.057385
7.448137
0.813275
return self._call_endpoint(GET_PEERS, id=id, endpoint=endpoint)
def get_peers(self, id=None, endpoint=None)
Get the current peers of a remote node Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
5.258243
7.402877
0.710297
return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)
def get_validators(self, id=None, endpoint=None)
Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
5.379333
7.605227
0.707321
return self._call_endpoint(GET_VERSION, id=id, endpoint=endpoint)
def get_version(self, id=None, endpoint=None)
Get the current version of the endpoint. Note: Not all endpoints currently implement this method Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
5.983408
6.938131
0.862395
return self._call_endpoint(GET_NEW_ADDRESS, id=id, endpoint=endpoint)
def get_new_address(self, id=None, endpoint=None)
Create new address Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
4.84234
6.338957
0.763902
return self._call_endpoint(GET_WALLET_HEIGHT, id=id, endpoint=endpoint)
def get_wallet_height(self, id=None, endpoint=None)
Get the current wallet index height. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
4.323581
5.754502
0.751339
return self._call_endpoint(LIST_ADDRESS, id=id, endpoint=endpoint)
def list_address(self, id=None, endpoint=None)
Lists all the addresses in the current wallet. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
5.909866
9.327844
0.633573
params = [asset_id, addr_from, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_FROM, params=params, id=id, endpoint=endpoint)
def send_from(self, asset_id, addr_from, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None)
Transfer from the specified address to the destination address. Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') addr_from: (str) transfering address to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
2.149117
2.674231
0.803639
params = [asset_id, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_TO_ADDRESS, params=params, id=id, endpoint=endpoint)
def send_to_address(self, asset_id, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None)
Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
2.185006
2.696913
0.810188
params = [outputs_array] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_MANY, params=params, id=id, endpoint=endpoint)
def send_many(self, outputs_array, fee=None, change_addr=None, id=None, endpoint=None)
Args: outputs_array: (dict) array, the data structure of each element in the array is as follows: {"asset": <asset>,"value": <value>,"address": <address>} asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') value: (int/decimal) transfer amount address: (str) destination address fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use
2.440302
2.88868
0.844781
if mpi: if not MPIPool.enabled(): raise SystemError("Tried to run with MPI but MPIPool not enabled.") pool = MPIPool(**kwargs) if not pool.is_master(): pool.wait() sys.exit(0) log.info("Running with MPI on {0} cores".format(pool.size)) return pool elif processes != 1 and MultiPool.enabled(): log.info("Running with MultiPool on {0} cores".format(processes)) return MultiPool(processes=processes, **kwargs) else: log.info("Running with SerialPool") return SerialPool(**kwargs)
def choose_pool(mpi=False, processes=1, **kwargs)
Choose between the different pools given options from, e.g., argparse. Parameters ---------- mpi : bool, optional Use the MPI processing pool, :class:`~schwimmbad.mpi.MPIPool`. By default, ``False``, will use the :class:`~schwimmbad.serial.SerialPool`. processes : int, optional Use the multiprocessing pool, :class:`~schwimmbad.multiprocessing.MultiPool`, with this number of processes. By default, ``processes=1``, will use the :class:`~schwimmbad.serial.SerialPool`. **kwargs Any additional kwargs are passed in to the pool class initializer selected by the arguments.
2.761187
2.766969
0.99791
if self.is_master(): return worker = self.comm.rank status = MPI.Status() while True: log.log(_VERBOSE, "Worker {0} waiting for task".format(worker)) task = self.comm.recv(source=self.master, tag=MPI.ANY_TAG, status=status) if task is None: log.log(_VERBOSE, "Worker {0} told to quit work".format(worker)) break func, arg = task log.log(_VERBOSE, "Worker {0} got task {1} with tag {2}" .format(worker, arg, status.tag)) result = func(arg) log.log(_VERBOSE, "Worker {0} sending answer {1} with tag {2}" .format(worker, result, status.tag)) self.comm.ssend(result, self.master, status.tag) if callback is not None: callback()
def wait(self, callback=None)
Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user.
2.732618
2.624491
1.041199
# If not the master just wait for instructions. if not self.is_master(): self.wait() return if callback is None: callback = _dummy_callback workerset = self.workers.copy() tasklist = [(tid, (worker, arg)) for tid, arg in enumerate(tasks)] resultlist = [None] * len(tasklist) pending = len(tasklist) while pending: if workerset and tasklist: worker = workerset.pop() taskid, task = tasklist.pop() log.log(_VERBOSE, "Sent task %s to worker %s with tag %s", task[1], worker, taskid) self.comm.send(task, dest=worker, tag=taskid) if tasklist: flag = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) if not flag: continue else: self.comm.Probe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) status = MPI.Status() result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) worker = status.source taskid = status.tag log.log(_VERBOSE, "Master received from worker %s with tag %s", worker, taskid) callback(result) workerset.add(worker) resultlist[taskid] = result pending -= 1 return resultlist
def map(self, worker, tasks, callback=None)
Evaluate a function or callable on each task in parallel using MPI. The callable, ``worker``, is called on each element of the ``tasks`` iterable. The results are returned in the expected order (symmetric with ``tasks``). Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : list A list of results from the output of each ``worker()`` call.
2.730429
2.799983
0.975159
if self.is_worker(): return for worker in self.workers: self.comm.send(None, worker, 0)
def close(self)
Tell all the workers to quit.
8.136889
5.83849
1.393663
try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version
def update_git_devstr(version, path=None)
Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate.
5.147092
5.087465
1.01172
if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip()
def get_git_devstr(sha=False, show_warning=True, path=None)
Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified.
3.055162
2.985267
1.023413
if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
def _get_repo_path(pathname, levels=None)
Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo.
1.790442
1.764688
1.014594
return self._call_callback(callback, map(func, iterable))
def map(self, func, iterable, callback=None)
A wrapper around the built-in ``map()`` function to provide a consistent interface with the other ``Pool`` classes. Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : generator
8.549017
30.698589
0.278482
# not handling rare Leucine or Valine starts! if aa_pos == 0 and codon in START_CODONS: return "M" elif codon in STOP_CODONS: return "*" else: return DNA_CODON_TABLE[codon]
def translate_codon(codon, aa_pos)
Translate a single codon into a single amino acid or stop '*' Parameters ---------- codon : str Expected to be of length 3 aa_pos : int Codon/amino acid offset into the protein (starting from 0)
6.199459
6.96658
0.889886
if not isinstance(nucleotide_sequence, Seq): nucleotide_sequence = Seq(nucleotide_sequence) if truncate: # if sequence isn't a multiple of 3, truncate it so BioPython # doesn't complain n_nucleotides = int(len(nucleotide_sequence) / 3) * 3 nucleotide_sequence = nucleotide_sequence[:n_nucleotides] else: n_nucleotides = len(nucleotide_sequence) assert n_nucleotides % 3 == 0, \ ("Expected nucleotide sequence to be multiple of 3" " but got %s of length %d") % ( nucleotide_sequence, n_nucleotides) # passing cds=False to translate since we may want to deal with premature # stop codons protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False) if first_codon_is_start and ( len(protein_sequence) == 0 or protein_sequence[0] != "M"): if nucleotide_sequence[:3] in START_CODONS: # TODO: figure out when these should be made into methionines # and when left as whatever amino acid they normally code for # e.g. Leucine start codons # See: DOI: 10.1371/journal.pbio.0020397 return "M" + protein_sequence[1:] else: raise ValueError( ("Expected first codon of %s to be start codon" " (one of %s) but got %s") % ( protein_sequence[:10], START_CODONS, nucleotide_sequence)) return protein_sequence
def translate( nucleotide_sequence, first_codon_is_start=True, to_stop=True, truncate=False)
Translates cDNA coding sequence into amino acid protein sequence. Should typically start with a start codon but allowing non-methionine first residues since the CDS we're translating might have been affected by a start loss mutation. The sequence may include the 3' UTR but will stop translation at the first encountered stop codon. Parameters ---------- nucleotide_sequence : BioPython Seq cDNA sequence first_codon_is_start : bool Treat the beginning of nucleotide_sequence (translates methionin) truncate : bool Truncate sequence if it's not a multiple of 3 (default = False) Returns BioPython Seq of amino acids
3.333416
3.302676
1.009307
n_mutant_codons = len(nucleotide_sequence) // 3 for i in range(n_mutant_codons): codon = nucleotide_sequence[3 * i:3 * i + 3] if codon in STOP_CODONS: return i return -1
def find_first_stop_codon(nucleotide_sequence)
Given a sequence of codons (expected to have length multiple of three), return index of first stop codon, or -1 if none is in the sequence.
2.209617
2.129264
1.037737
mutant_stop_codon_index = find_first_stop_codon(mutant_codons) using_three_prime_utr = False if mutant_stop_codon_index != -1: mutant_codons = mutant_codons[:3 * mutant_stop_codon_index] elif ref_codon_end_offset > len(transcript.protein_sequence): # if the mutant codons didn't contain a stop but did mutate the # true reference stop codon then the translated sequence might involve # the 3' UTR three_prime_utr = transcript.three_prime_utr_sequence n_utr_codons = len(three_prime_utr) // 3 # trim the 3' UTR sequence to have a length that is a multiple of 3 truncated_utr_sequence = three_prime_utr[:n_utr_codons * 3] # note the offset of the first stop codon in the combined # nucleotide sequence of both the end of the CDS and the 3' UTR first_utr_stop_codon_index = find_first_stop_codon(truncated_utr_sequence) if first_utr_stop_codon_index > 0: # if there is a stop codon in the 3' UTR sequence and it's not the # very first codon using_three_prime_utr = True n_mutant_codons_before_utr = len(mutant_codons) // 3 mutant_stop_codon_index = n_mutant_codons_before_utr + first_utr_stop_codon_index # combine the in-frame mutant codons with the truncated sequence of # the 3' UTR mutant_codons += truncated_utr_sequence[:first_utr_stop_codon_index * 3] elif first_utr_stop_codon_index == -1: # if there is no stop codon in the 3' UTR sequence using_three_prime_utr = True mutant_codons += truncated_utr_sequence amino_acids = translate( mutant_codons, first_codon_is_start=(ref_codon_start_offset == 0)) return amino_acids, mutant_stop_codon_index, using_three_prime_utr
def translate_in_frame_mutation( transcript, ref_codon_start_offset, ref_codon_end_offset, mutant_codons)
Returns: - mutant amino acid sequence - offset of first stop codon in the mutant sequence (or -1 if there was none) - boolean flag indicating whether any codons from the 3' UTR were used Parameters ---------- transcript : pyensembl.Transcript Reference transcript to which a cDNA mutation should be applied. ref_codon_start_offset : int Starting (base 0) integer offset into codons (character triplets) of the transcript's reference coding sequence. ref_codon_end_offset : int Final (base 0) integer offset into codons of the transcript's reference coding sequence. mutant_codons : str Nucleotide sequence to replace the reference codons with (expected to have length that is a multiple of three)
2.605725
2.560998
1.017465
print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
def main(args_list=None)
Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json
2.633858
2.894401
0.909984
require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None)
Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file.
3.297721
3.50163
0.941768
# pylint: disable=no-member # pylint gets confused by read_csv inside load_maf_dataframe maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding) if len(maf_df) == 0 and raise_on_error: raise ValueError("Empty MAF file %s" % path) ensembl_objects = {} variants = [] metadata = {} for _, x in maf_df.iterrows(): contig = x.Chromosome if isnull(contig): error_message = "Invalid contig name: %s" % (contig,) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue start_pos = x.Start_Position ref = x.Reference_Allele # it's possible in a MAF file to have multiple Ensembl releases # mixed in a single MAF file (the genome assembly is # specified by the NCBI_Build column) ncbi_build = x.NCBI_Build if ncbi_build in ensembl_objects: ensembl = ensembl_objects[ncbi_build] else: if isinstance(ncbi_build, int): reference_name = "B%d" % ncbi_build else: reference_name = str(ncbi_build) ensembl = infer_genome(reference_name) ensembl_objects[ncbi_build] = ensembl # have to try both Tumor_Seq_Allele1 and Tumor_Seq_Allele2 # to figure out which is different from the reference allele if x.Tumor_Seq_Allele1 != ref: alt = x.Tumor_Seq_Allele1 else: if x.Tumor_Seq_Allele2 == ref: error_message = ( "Both tumor alleles agree with reference %s: %s" % ( ref, x,)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue alt = x.Tumor_Seq_Allele2 variant = Variant( contig, start_pos, str(ref), str(alt), ensembl=ensembl) # keep metadata about the variant and its TCGA annotation metadata[variant] = { 'Hugo_Symbol': x.Hugo_Symbol, 'Center': x.Center, 'Strand': x.Strand, 'Variant_Classification': x.Variant_Classification, 'Variant_Type': x.Variant_Type, 'dbSNP_RS': x.dbSNP_RS, 'dbSNP_Val_Status': x.dbSNP_Val_Status, 'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode, 'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode, } for optional_col in optional_cols: if optional_col in x: metadata[variant][optional_col] = x[optional_col] variants.append(variant) return VariantCollection( variants=variants, source_to_metadata_dict={path: metadata}, sort_key=sort_key, distinct=distinct)
def load_maf( path, optional_cols=[], sort_key=variant_ascending_position_sort_key, distinct=True, raise_on_error=True, encoding=None)
Load reference name and Variant objects from MAF filename. Parameters ---------- path : str Path to MAF (*.maf). optional_cols : list, optional A list of MAF columns to include as metadata if they are present in the MAF. Does not result in an error if those columns are not present. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : bool Don't keep repeated variants raise_on_error : bool Raise an exception upon encountering an error or just log a warning. encoding : str, optional Encoding to use for UTF when reading MAF file.
2.351243
2.396702
0.981033
value = getattr(effect, field_name, None) if value is None: return default else: return fn(value)
def apply_to_field_if_exists(effect, field_name, fn, default)
Apply function to specified field of effect if it is not None, otherwise return default.
2.583436
2.501711
1.032668
return apply_to_field_if_exists( effect=effect, field_name="transcript", fn=fn, default=default)
def apply_to_transcript_if_exists(effect, fn, default)
Apply function to transcript associated with effect, if it exists, otherwise return default.
3.579295
4.313913
0.82971
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.exons), default=0)
def number_exons_in_associated_transcript(effect)
Number of exons on transcript associated with effect, if there is one (otherwise return 0).
5.733887
6.167634
0.929674
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.coding_sequence) if (t.complete and t.coding_sequence) else 0, default=0)
def cds_length_of_associated_transcript(effect)
Length of coding sequence of transcript associated with effect, if there is one (otherwise return 0).
5.517717
5.65778
0.975244
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.sequence), default=0)
def length_of_associated_transcript(effect)
Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0).
6.775761
7.311142
0.926772
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.name, default="")
def name_of_associated_transcript(effect)
Name of transcript associated with effect, if there is one (otherwise return "").
7.811821
8.554767
0.913154
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
def gene_id_of_associated_transcript(effect)
Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript.
5.855022
6.39141
0.916077
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.complete, default=False)
def effect_has_complete_transcript(effect)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS
6.930968
9.548182
0.725894
return apply_to_gene_if_exists( effect=effect, fn=lambda g: g.biotype == "protein_coding", default=False)
def effect_associated_with_protein_coding_gene(effect)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a gene and that gene has a protein_coding biotype.
6.528742
7.009925
0.931357
return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.biotype == "protein_coding", default=False)
def effect_associated_with_protein_coding_transcript(effect)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a transcript and that transcript has a protein_coding biotype.
6.24845
6.791874
0.919989
name = name_of_associated_transcript(effect) if "-" not in name: return 0 parts = name.split("-") last_part = parts[-1] if last_part.isdigit(): return int(last_part) else: return 0
def parse_transcript_number(effect)
Try to parse the number at the end of a transcript name associated with an effect. e.g. TP53-001 returns the integer 1. Parameters ---------- effect : subclass of MutationEffect Returns int
3.331384
3.297862
1.010165
return tuple([ effect_priority(effect), effect_associated_with_protein_coding_gene(effect), effect_associated_with_protein_coding_transcript(effect), effect_has_complete_transcript(effect), cds_length_of_associated_transcript(effect), length_of_associated_transcript(effect), number_exons_in_associated_transcript(effect), transcript_name_ends_with_01(effect), -parse_transcript_number(effect) ])
def multi_gene_effect_sort_key(effect)
This function acts as a sort key for choosing the highest priority effect across multiple genes (so does not assume that effects might involve the same start/stop codons). Returns tuple with the following elements: 1) Integer priority of the effect type. 2) Does the associated gene have a "protein_coding" biotype? False if no gene is associated with effect. 3) Does the associated transcript have a "protein_coding" biotype? False if no transcript is associated with effect. 4) Is the associated transcript complete? False if no transcript is associated with effect. 5) CDS length This value will be 0 if the effect has no associated transcript or if the transcript is noncoding or incomplete 6) Total length of the transcript This value will be 0 intra/intergenic variants effects without an associated transcript. 7) Number of exons This value will be 0 intra/intergenic variants effects without an associated transcript. 8) If everything is the same up this point then let's use the very sloppy heuristic of preferring transcripts like "TP53-201" over "TP53-206", so anything ending with "01" is considered better. 9) Lastly, if we end up with two transcripts like "TP53-202" and "TP53-203", prefer the one with the lowest number in the name.
5.159819
2.756986
1.871543
if effect.__class__ is not ExonicSpliceSite: return effect if effect.alternate_effect is None: return effect splice_priority = effect_priority(effect) alternate_priority = effect_priority(effect.alternate_effect) if splice_priority > alternate_priority: return effect else: return effect.alternate_effect
def select_between_exonic_splice_site_and_alternate_effect(effect)
If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority. In that case, return the alternate effect. Otherwise, this acts as an identity function.
2.660768
2.431139
1.094453
priority_values = map(effect_priority, effects) max_priority = max(priority_values) return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
def keep_max_priority_effects(effects)
Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter
2.632735
3.525689
0.746729
for filter_fn in filters: filtered_effects = filter_fn(effects) if len(effects) == 1: return effects elif len(filtered_effects) > 1: effects = filtered_effects return effects
def filter_pipeline(effects, filters)
Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects
2.765945
2.750121
1.005754
# first filter effects to keep those on # 1) maximum priority effects # 2) protein coding genes # 3) protein coding transcripts # 4) complete transcripts # # If any of these filters drop all the effects then we move on to the next # filtering step. effects = filter_pipeline( effects=effects, filters=[ keep_max_priority_effects, keep_effects_on_protein_coding_genes, keep_effects_on_protein_coding_transcripts, keep_effects_on_complete_transcripts, ], ) if len(effects) == 1: return effects[0] # compare CDS length and transcript lengths of remaining effects # if one effect has the maximum of both categories then return it cds_lengths = [cds_length_of_associated_transcript(e) for e in effects] max_cds_length = max(cds_lengths) # get set of indices of all effects with maximum CDS length max_cds_length_indices = { i for (i, l) in enumerate(cds_lengths) if l == max_cds_length } seq_lengths = [length_of_associated_transcript(e) for e in effects] max_seq_length = max(seq_lengths) # get set of indices for all effects whose associated transcript # has maximum sequence length max_seq_length_indices = { i for (i, l) in enumerate(seq_lengths) if l == max_seq_length } # which effects have transcripts with both the longest CDS and # longest full transcript sequence? intersection_of_indices = \ max_cds_length_indices.intersection(max_seq_length_indices) n_candidates = len(intersection_of_indices) if n_candidates == 1: best_index = intersection_of_indices.pop() return effects[best_index] elif n_candidates == 0: # if set of max CDS effects and max sequence length effects is disjoint # then let's try to do the tie-breaking sort over their union union_of_indices = max_cds_length_indices.union(max_seq_length_indices) candidate_effects = [effects[i] for i in union_of_indices] else: # if multiple effects have transcripts with the max CDS length and # the max full sequence length then run the tie-breaking sort # over all these candidates candidate_effects = [effects[i] for i in intersection_of_indices] # break ties by number of exons, whether name of transcript ends if "01", # and all else being equal, prefer transcript names that end with lower # numbers return max( candidate_effects, key=tie_breaking_sort_key_for_single_gene_effects)
def top_priority_effect_for_single_gene(effects)
For effects which are from the same gene, check to see if there is a canonical transcript with both the maximum length CDS and maximum length full transcript sequence. If not, then use number of exons and transcript name as tie-breaking features. Parameters ---------- effects : list of MutationEffect subclass instances Returns single effect object
3.642184
3.412594
1.067278
if len(effects) == 0: raise ValueError("List of effects cannot be empty") effects = map( select_between_exonic_splice_site_and_alternate_effect, effects) effects_grouped_by_gene = apply_groupby( effects, fn=gene_id_of_associated_transcript, skip_none=False) if None in effects_grouped_by_gene: effects_without_genes = effects_grouped_by_gene.pop(None) else: effects_without_genes = [] # if we had any effects associated with genes then choose one of those if len(effects_grouped_by_gene) > 0: effects_with_genes = [ top_priority_effect_for_single_gene(gene_effects) for gene_effects in effects_grouped_by_gene.values() ] return max(effects_with_genes, key=multi_gene_effect_sort_key) else: # if all effects were without genes then choose the best among those assert len(effects_without_genes) > 0 return max(effects_without_genes, key=multi_gene_effect_sort_key)
def top_priority_effect(effects)
Given a collection of variant transcript effects, return the top priority object. ExonicSpliceSite variants require special treatment since they actually represent two effects -- the splicing modification and whatever else would happen to the exonic sequence if nothing else gets changed. In cases where multiple transcripts give rise to multiple effects, use a variety of filtering and sorting heuristics to pick the canonical transcript.
3.612969
3.386329
1.066928
return dict( variants=self.variants, distinct=self.distinct, sort_key=self.sort_key, sources=self.sources, source_to_metadata_dict=self.source_to_metadata_dict)
def to_dict(self)
Since Collection.to_dict() returns a state dictionary with an 'elements' field we have to rename it to 'variants'.
5.157603
4.087605
1.261766
kwargs = self.to_dict() kwargs["variants"] = new_elements return self.from_dict(kwargs)
def clone_with_new_elements(self, new_elements)
Create another VariantCollection of the same class and with same state (including metadata) but possibly different entries. Warning: metadata is a dictionary keyed by variants. This method leaves that dictionary as-is, which may result in extraneous entries or missing entries.
5.693569
4.734573
1.202552
return EffectCollection([ effect for variant in self for effect in variant.effects(raise_on_error=raise_on_error) ])
def effects(self, raise_on_error=True)
Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged.
4.703227
4.593598
1.023866
return { gene_name: len(group) for (gene_name, group) in self.groupby_gene_name().items() }
def gene_counts(self)
Returns number of elements overlapping each gene name. Expects the derived class (VariantCollection or EffectCollection) to have an implementation of groupby_gene_name.
5.124761
2.955449
1.734004
return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0)
Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
6.211822
6.676188
0.930444
return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0)
Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
6.403246
7.025393
0.911443
''' Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison. ''' return ( self.__class__ == other.__class__ and len(self) == len(other) and all(x.exactly_equal(y) for (x, y) in zip(self, other)))
def exactly_equal(self, other)
Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison.
5.13644
1.998442
2.570222
# three levels of nested dictionaries! # {source name: {variant: {attribute: value}}} combined_dictionary = {} for source_to_metadata_dict in dictionaries: for source_name, variant_to_metadata_dict in source_to_metadata_dict.items(): combined_dictionary.setdefault(source_name, {}) combined_source_dict = combined_dictionary[source_name] for variant, metadata_dict in variant_to_metadata_dict.items(): combined_source_dict.setdefault(variant, {}) combined_source_dict[variant].update(metadata_dict) return combined_dictionary
def _merge_metadata_dictionaries(cls, dictionaries)
Helper function for combining variant collections: given multiple dictionaries mapping: source name -> (variant -> (attribute -> value)) Returns dictionary with union of all variants and sources.
2.623741
2.322685
1.129615
kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries( [vc.source_to_metadata_dict for vc in variant_collections]) kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections])) for key, value in variant_collections[0].to_dict().items(): # If some optional parameter isn't explicitly specified as an # argument to union() or intersection() then use the same value # as the first VariantCollection. # # I'm doing this so that the meaning of VariantCollection.union # and VariantCollection.intersection with a single argument is # the identity function (rather than setting optional parameters # to their default values. if key not in kwargs: kwargs[key] = value return cls(**kwargs)
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs)
Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection.
4.757102
4.782184
0.994755
return self._combine_variant_collections( combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
def union(self, *others, **kwargs)
Returns the union of variants in a several VariantCollection objects.
8.685221
5.589685
1.553794
return self._combine_variant_collections( combine_fn=set.intersection, variant_collections=(self,) + others, kwargs=kwargs)
def intersection(self, *others, **kwargs)
Returns the intersection of variants in several VariantCollection objects.
9.143491
5.694417
1.605694
def row_from_variant(variant): return OrderedDict([ ("chr", variant.contig), ("start", variant.original_start), ("ref", variant.original_ref), ("alt", variant.original_alt), ("gene_name", ";".join(variant.gene_names)), ("gene_id", ";".join(variant.gene_ids)) ]) rows = [row_from_variant(v) for v in self] if len(rows) == 0: # TODO: return a DataFrame with the appropriate columns return pd.DataFrame() return pd.DataFrame.from_records(rows, columns=rows[0].keys())
def to_dataframe(self)
Build a DataFrame from this variant collection
2.711717
2.502161
1.08375
n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
def trim_shared_prefix(ref, alt)
Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix.
3.077936
3.118914
0.986861
n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[-i - 1] == alt[-i - 1]: i += 1 # i is length of shared suffix. if i == 0: return (ref, alt, '') return (ref[:-i], alt[:-i], ref[-i:])
def trim_shared_suffix(ref, alt)
Reuse the `trim_shared_prefix` function above to implement similar functionality for string suffixes. Given ref='ABC' and alt='BC', we first revese both strings: reverse_ref = 'CBA' reverse_alt = 'CB' and then the result of calling trim_shared_prefix will be: ('A', '', 'CB') We then reverse all three of the result strings to get back the shared suffix and both prefixes leading up to it: ('A', '', 'BC')
2.275031
2.457232
0.925851
ref, alt, prefix = trim_shared_prefix(ref, alt) ref, alt, suffix = trim_shared_suffix(ref, alt) return ref, alt, prefix, suffix
def trim_shared_flanking_strings(ref, alt)
Given two nucleotide or amino acid strings, identify if they have a common prefix, a common suffix, and return their unique components along with the prefix and suffix. For example, if the input ref = "SYFFQGR" and alt = "SYMLLFIFQGR" then the result will be: ("F", "MLLFI", "SY", "FQGR")
2.343848
2.82276
0.830339
print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) effects = variants.effects() if args.only_coding: effects = effects.drop_silent_and_noncoding() if args.one_per_variant: variant_to_effect_dict = effects.top_priority_effect_per_variant() effects = effects.clone_with_new_elements(list(variant_to_effect_dict.values())) effects_dataframe = effects.to_dataframe() logger.info('\n%s', effects) if args.output_csv: effects_dataframe.to_csv(args.output_csv, index=False)
def main(args_list=None)
Script which loads variants and annotates them with overlapping genes and predicted coding effects. Example usage: varcode --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json
3.590238
3.740912
0.959723
# index (starting from 0) of first affected reference codon ref_codon_start_offset = cds_offset // 3 # which nucleotide of the first codon got changed? nucleotide_offset_into_first_ref_codon = cds_offset % 3 n_ref_nucleotides = len(trimmed_cdna_ref) if n_ref_nucleotides == 0: if nucleotide_offset_into_first_ref_codon == 2: # if we're inserting between codons ref_codon_end_offset = ref_codon_start_offset else: # inserting inside a reference codon ref_codon_end_offset = ref_codon_start_offset + 1 ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # split the reference codon into nucleotides before/after insertion prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1] suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:] else: ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1 # codons in the reference sequence ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # We construct the new codons by taking the unmodified prefix # of the first ref codon, the unmodified suffix of the last ref codon # and sticking the alt nucleotides in between. # Since this is supposed to be an in-frame mutation, the concatenated # nucleotide string is expected to have a length that is a multiple of # three. prefix = ref_codons[:nucleotide_offset_into_first_ref_codon] offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3 if offset_in_last_ref_codon == 0: suffix = ref_codons[-2:] elif offset_in_last_ref_codon == 1: suffix = ref_codons[-1:] else: suffix = "" mutant_codons = prefix + trimmed_cdna_alt + suffix assert len(mutant_codons) % 3 == 0, \ "Expected in-frame mutation but got %s (length = %d)" % ( mutant_codons, len(mutant_codons)) return ref_codon_start_offset, ref_codon_end_offset, mutant_codons
def get_codons( variant, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset)
Returns indices of first and last reference codons affected by the variant, as well as the actual sequence of the mutated codons which replace those reference codons. Parameters ---------- variant : Variant trimmed_cdna_ref : str Trimmed reference cDNA nucleotides affected by the variant trimmed_cdna_alt : str Trimmed alternate cDNA nucleotides which replace the reference sequence_from_start_codon : str cDNA nucleotide coding sequence cds_offset : int Integer offset into the coding sequence where ref is replace with alt
2.51836
2.488416
1.012033
variant_arg_group = arg_parser.add_argument_group( title="Variants", description="Genomic variant files") variant_arg_group.add_argument( "--vcf", default=[], action="append", help="Genomic variants in VCF format") variant_arg_group.add_argument( "--maf", default=[], action="append", help="Genomic variants in TCGA's MAF format",) variant_arg_group.add_argument( "--variant", default=[], action="append", nargs=4, metavar=("CHR", "POS", "REF", "ALT"), help=( "Individual variant as 4 arguments giving chromsome, position, ref," " and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles" " for insertions or deletions.")) variant_arg_group.add_argument( "--genome", type=str, help=( "What reference assembly your variant coordinates are using. " "Examples: 'hg19', 'GRCh38', or 'mm9'. " "This argument is ignored for MAF files, since each row includes " "the reference. " "For VCF files, this is used if specified, and otherwise is guessed from " "the header. For variants specfied on the commandline with --variant, " "this option is required.")) variant_arg_group.add_argument( "--download-reference-genome-data", action="store_true", default=False, help=( ("Automatically download genome reference data required for " "annotation using PyEnsembl. Otherwise you must first run " "'pyensembl install' for the release/species corresponding " "to the genome used in your VCF."))) variant_arg_group.add_argument( "--json-variants", default=[], action="append", help="Path to Varcode.VariantCollection object serialized as a JSON file.") return variant_arg_group
def add_variant_args(arg_parser)
Extends an ArgumentParser instance with the following commandline arguments: --vcf --genome --maf --variant --json-variants
4.347223
4.177251
1.04069
assert 0 < offset <= len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset] suffix = sequence[offset:] return prefix + new_residues + suffix
def insert_before(sequence, offset, new_residues)
Mutate the given sequence by inserting the string `new_residues` before `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence
2.673968
3.819971
0.699997
assert 0 <= offset < len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset + 1] suffix = sequence[offset + 1:] return prefix + new_residues + suffix
def insert_after(sequence, offset, new_residues)
Mutate the given sequence by inserting the string `new_residues` after `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence
2.501841
3.3636
0.743799
n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref), \ "Reference %s at offset %d != expected reference %s" % \ (sequence_ref, offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
def substitute(sequence, offset, ref, alt)
Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert
2.811156
3.62152
0.776236
match_recency = [ int(re.search('\d+', assembly_name).group()) for assembly_name in assembly_names ] most_recent = [ x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0] return most_recent
def _most_recent_assembly(assembly_names)
Given list of (in this case, matched) assemblies, identify the most recent ("recency" here is determined by sorting based on the numeric element of the assembly name)
3.134605
2.593793
1.208502
# identify all cases where reference name or path matches candidate aliases reference_file_name = os.path.basename(reference_name_or_path) matches = {'file_name': list(), 'full_path': list()} for assembly_name in reference_alias_dict.keys(): candidate_list = [assembly_name] + reference_alias_dict[assembly_name] for candidate in candidate_list: if candidate.lower() in reference_file_name.lower(): matches['file_name'].append(assembly_name) elif candidate.lower() in reference_name_or_path.lower(): matches['full_path'].append(assembly_name) # remove duplicate matches (happens due to overlapping aliases) matches['file_name'] = list(set(matches['file_name'])) matches['full_path'] = list(set(matches['full_path'])) # given set of existing matches, choose one to return # (first select based on file_name, then full path. If multiples, use most recent) if len(matches['file_name']) == 1: match = matches['file_name'][0] elif len(matches['file_name']) > 1: # separate logic for >1 vs 1 to give informative warning match = _most_recent_assembly(matches['file_name']) warn( ('More than one reference ({}) matches path in header ({}); ' 'the most recent one ({}) was used.').format( ','.join(matches['file_name']), reference_file_name, match)) elif len(matches['full_path']) >= 1: # combine full-path logic since warning is the same match = _most_recent_assembly(matches['full_path']) warn(( 'Reference could not be matched against filename ({}); ' 'using best match against full path ({}).').format( reference_name_or_path, match)) else: raise ValueError( "Failed to infer genome assembly name for %s" % reference_name_or_path) return match
def infer_reference_name(reference_name_or_path)
Given a string containing a reference name (such as a path to that reference's FASTA file), return its canonical name as used by Ensembl.
3.500279
3.528256
0.992071
if isinstance(genome_object_string_or_int, Genome): return genome_object_string_or_int if is_integer(genome_object_string_or_int): return cached_release(genome_object_string_or_int) elif is_string(genome_object_string_or_int): # first infer the canonical reference name, e.g. mapping hg19 -> GRCh37 # and then get the associated PyEnsembl Genome object reference_name = infer_reference_name(genome_object_string_or_int) return genome_for_reference_name(reference_name) else: raise TypeError( ("Expected genome to be an int, string, or pyensembl.Genome " "instance, got %s : %s") % ( str(genome_object_string_or_int), type(genome_object_string_or_int)))
def infer_genome(genome_object_string_or_int)
If given an integer, return associated human EnsemblRelease for that Ensembl version. If given a string, return latest EnsemblRelease which has a reference of the same name. If given a PyEnsembl Genome, simply return it.
2.875619
2.618043
1.098385
return dict( contig=self.original_contig, start=self.original_start, ref=self.original_ref, alt=self.original_alt, ensembl=self.ensembl, allow_extended_nucleotides=self.allow_extended_nucleotides, normalize_contig_name=self.normalize_contig_name)
def to_dict(self)
We want the original values (un-normalized) field values while serializing since normalization will happen in __init__.
3.446173
3.162999
1.089527
if self.is_insertion: return "chr%s g.%d_%dins%s" % ( self.contig, self.start, self.start + 1, self.alt) elif self.is_deletion: return "chr%s g.%d_%ddel%s" % ( self.contig, self.start, self.end, self.ref) elif self.ref == self.alt: return "chr%s g.%d%s" % (self.contig, self.start, self.ref) else: # substitution return "chr%s g.%d%s>%s" % ( self.contig, self.start, self.ref, self.alt)
def short_description(self)
HGVS nomenclature for genomic variants More info: http://www.hgvs.org/mutnomen/
2.217043
1.869974
1.185601
if self._genes is None: self._genes = self.ensembl.genes_at_locus( self.contig, self.start, self.end) return self._genes
def genes(self)
Return Gene object for all genes which overlap this variant.
3.663118
3.098627
1.182175
return self.ensembl.gene_ids_at_locus( self.contig, self.start, self.end)
def gene_ids(self)
Return IDs of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object.
6.782856
5.683954
1.193334
return self.ensembl.gene_names_at_locus( self.contig, self.start, self.end)
def gene_names(self)
Return names of all genes which overlap this variant. Calling this method is significantly cheaper than calling `Variant.genes()`, which has to issue many more queries to construct each Gene object.
6.710006
5.841373
1.148704
# An insertion would appear in a VCF like C>CT, so that the # alternate allele starts with the reference nucleotides. # Since the nucleotide strings may be normalized in the constructor, # it's worth noting that the normalized form of this variant would be # ''>'T', so that 'T'.startswith('') still holds. return (len(self.ref) < len(self.alt)) and self.alt.startswith(self.ref)
def is_insertion(self)
Does this variant represent the insertion of nucleotides into the reference genome?
12.624583
10.387208
1.215397
# A deletion would appear in a VCF like CT>C, so that the # reference allele starts with the alternate nucleotides. # This is true even in the normalized case, where the alternate # nucleotides are an empty string. return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
def is_deletion(self)
Does this variant represent the deletion of nucleotides from the reference genome?
9.683304
7.895689
1.226404
return (len(self.ref) == len(self.alt) == 1) and (self.ref != self.alt)
def is_snv(self)
Is the variant a single nucleotide variant
3.489986
2.959089
1.179412
return self.is_snv and is_purine(self.ref) == is_purine(self.alt)
def is_transition(self)
Is this variant and pyrimidine to pyrimidine change or purine to purine change
11.084412
4.763504
2.326945
return self.is_snv and is_purine(self.ref) != is_purine(self.alt)
def is_transversion(self)
Is this variant a pyrimidine to purine change or vice versa
7.166682
4.452477
1.609595
if n_ref_bases == 0: # insertions only overlap intervals which start before and # end after the insertion point, they must be fully contained # by the other interval return interval_start <= variant_start and interval_end >= variant_start variant_end = variant_start + n_ref_bases # overlap means other interval starts before this variant ends # and the interval ends after this variant starts return interval_start <= variant_end and interval_end >= variant_start
def variant_overlaps_interval( variant_start, n_ref_bases, interval_start, interval_end)
Does a variant overlap a given interval on the same chromosome? Parameters ---------- variant_start : int Inclusive base-1 position of variant's starting location (or location before an insertion) n_ref_bases : int Number of reference bases affect by variant (used to compute end coordinate or determine whether variant is an insertion) interval_start : int Interval's inclusive base-1 start position interval_end : int Interval's inclusive base-1 end position
4.116658
4.417597
0.931877
# first we're going to make sure the variant doesn't disrupt the # splicing sequences we got from Divina et. al's # Ab initio prediction of mutation-induced cryptic # splice-site activation and exon skipping # (http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/) # # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # # 3' splice site: YAG|R # if exon_number > 1 and transcript_offset == exon_start_offset: # if this is any exon past the first, check to see if it lost # the purine on its left side # # the 3' splice site sequence has just a single purine on # the exon side if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES: if len(transcript_alt) > 0: if transcript_alt[0] not in PURINE_NUCLEOTIDES: return True else: # if the mutation is a deletion, are there ref nucleotides # afterward? offset_after_deletion = transcript_offset + len(transcript_ref) if len(transcript.sequence) > offset_after_deletion: next_base = transcript.sequence[offset_after_deletion] if next_base not in PURINE_NUCLEOTIDES: return True if exon_number < len(transcript.exons): # if the mutation affects an exon whose right end gets spliced # to a next exon, check if the variant alters the exon side of # 5' consensus splicing sequence # # splicing sequence: # MAG|GURAGU # M is A or C; R is purine; | is the exon-intron boundary # # TODO: check for overlap of two intervals instead of just # seeing if the mutation starts inside the exonic splice site if variant_overlaps_interval( variant_start=transcript_offset, n_ref_bases=len(transcript_ref), interval_start=exon_end_offset - 2, interval_end=exon_end_offset): end_of_reference_exon = transcript.sequence[ exon_end_offset - 2:exon_end_offset + 1] if matches_exon_end_pattern(end_of_reference_exon): # if the last three nucleotides conform to the consensus # sequence then treat any deviation as an ExonicSpliceSite # mutation end_of_variant_exon = end_of_reference_exon if matches_exon_end_pattern(end_of_variant_exon): # end of exon matches splicing signal, check if it still # does after the mutation return True
def changes_exonic_splice_site( transcript_offset, transcript, transcript_ref, transcript_alt, exon_start_offset, exon_end_offset, exon_number)
Does the given exonic mutation of a particular transcript change a splice site? Parameters ---------- transcript_offset : int Offset from start of transcript of first reference nucleotide (or the last nucleotide before an insertion) transcript : pyensembl.Transcript transcript_ref : str Reference nucleotides transcript_alt : alt Alternate nucleotides exon_start_offset : int Start offset of exon relative to beginning of transcript exon_end_offset : int End offset of exon relative to beginning of transcript exon_number : int Which exon in the order they form the transcript
5.240089
5.268132
0.994677
if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
def is_purine(nucleotide, allow_extended_nucleotides=False)
Is the nucleotide a purine
3.180152
3.051248
1.042247
if nucleotides in empty_chars: return "" elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan(nucleotides): return "" require_string(nucleotides, name="nucleotide string") nucleotides = nucleotides.upper() if allow_extended_nucleotides: valid_nucleotides = EXTENDED_NUCLEOTIDES else: valid_nucleotides = STANDARD_NUCLEOTIDES if not set(nucleotides) <= valid_nucleotides: raise ValueError( "Invalid character(s) in nucleotide string: %s" % ( ",".join(set(nucleotides) - valid_nucleotides),)) return nucleotides
def normalize_nucleotide_string( nucleotides, allow_extended_nucleotides=False, empty_chars=".-", treat_nan_as_empty=True)
Normalizes a nucleotide string by converting various ways of encoding empty strings into "", making all letters upper case, and checking to make sure all letters in the string are actually nucleotides. Parameters ---------- nucleotides : str Sequence of nucleotides, e.g. "ACCTG" extended_nucleotides : bool Allow non-canonical nucleotide characters like 'X' for unknown base empty_chars : str Characters which encode empty strings, such as "." used in VCF format or "-" used in MAF format treat_nan_as_empty : bool Some MAF files represent deletions/insertions with NaN ref/alt values
2.073395
2.661783
0.77895
return OrderedDict( (call.sample, call.data._asdict()) for call in calls)
def pyvcf_calls_to_sample_info_list(calls)
Given pyvcf.model._Call instances, return a dict mapping each sample name to its per-sample info: sample name -> field -> value
7.782516
9.313284
0.835636
expected_columns = ( ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] + (["INFO"] if info_parser else [])) if info_parser and sample_names: if sample_info_parser is None: raise TypeError( "Must specify sample_info_parser if specifying sample_names") expected_columns.append("FORMAT") expected_columns.extend(sample_names) variants = [] metadata = {} try: for chunk in dataframes: assert chunk.columns.tolist() == expected_columns,\ "dataframe columns (%s) do not match expected columns (%s)" % ( chunk.columns, expected_columns) for tpl in chunk.itertuples(): (i, chrom, pos, id_, ref, alts, qual, flter) = tpl[:8] if flter == ".": flter = None elif flter == "PASS": flter = [] elif only_passing: continue else: flter = flter.split(';') if id_ == ".": id_ = None qual = float(qual) if qual != "." else None alt_num = 0 info = sample_info = None for alt in alts.split(","): if alt != ".": if info_parser is not None and info is None: info = info_parser(tpl[8]) # INFO column if sample_names: # Sample name -> field -> value dict. sample_info = sample_info_parser( list(tpl[10:]), # sample info columns tpl[9], # FORMAT column ) variant = Variant( chrom, int(pos), # want a Python int not numpy.int64 ref, alt, **variant_kwargs) variants.append(variant) metadata[variant] = { 'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num, } if max_variants and len(variants) > max_variants: raise StopIteration alt_num += 1 except StopIteration: pass return VariantCollection( variants=variants, source_to_metadata_dict={source_path: metadata}, **variant_collection_kwargs)
def dataframes_to_variant_collection( dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={})
Load a VariantCollection from an iterable of pandas dataframes. This takes an iterable of dataframes instead of a single dataframe to avoid having to load huge dataframes at once into memory. If you have a single dataframe, just pass it in a single-element list. Parameters ---------- dataframes Iterable of dataframes (e.g. a generator). Expected columns are: ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER"] and 'INFO' if `info_parser` is not Null. Columns must be in this order. source_path : str Path of VCF file from which DataFrame chunks were generated. info_parser : string -> object, optional Callable to parse INFO strings. only_passing : boolean, optional If true, any entries whose FILTER field is not one of "." or "PASS" is dropped. max_variants : int, optional If specified, return only the first max_variants variants. sample_names : list of strings, optional Sample names. The final columns of the dataframe should match these. If specified, the per-sample info columns will be parsed. You must also specify sample_info_parser. sample_info_parser : string list * string -> dict, optional Callable to parse per-sample info columns. variant_kwargs : dict, optional Additional keyword paramters to pass to Variant.__init__ variant_collection_kwargs : dict, optional Additional keyword parameters to pass to VariantCollection.__init__.
2.838182
2.702109
1.050358
vcf_field_types = OrderedDict() vcf_field_types['CHROM'] = str vcf_field_types['POS'] = int vcf_field_types['ID'] = str vcf_field_types['REF'] = str vcf_field_types['ALT'] = str vcf_field_types['QUAL'] = str vcf_field_types['FILTER'] = str if include_info: vcf_field_types['INFO'] = str if sample_names: vcf_field_types['FORMAT'] = str for name in sample_names: vcf_field_types[name] = str parsed_path = parse_url_or_path(path) if not parsed_path.scheme or parsed_path.scheme.lower() == "file": path = parsed_path.path else: raise NotImplementedError("Only local files are supported.") compression = None if path.endswith(".gz"): compression = "gzip" elif path.endswith(".bz2"): compression = "bz2" reader = pandas.read_table( path, compression=compression, comment="#", chunksize=chunk_size, dtype=vcf_field_types, names=list(vcf_field_types), usecols=range(len(vcf_field_types))) return reader
def read_vcf_into_dataframe( path, include_info=False, sample_names=None, chunk_size=None)
Load the data of a VCF into a pandas dataframe. All headers are ignored. Parameters ---------- path : str Path to local file. HTTP and other protocols are not implemented. include_info : boolean, default False If true, the INFO field is not parsed, but is included as a string in the resulting data frame. If false, the INFO field is omitted. sample_names: string list, optional Sample names. The final columns of the dataframe should match these. If specified (and include_info is also specified), the FORMAT and per-sample info columns will be included in the result dataframe. chunk_size : int, optional If buffering is desired, the number of rows per chunk. Returns --------- If chunk_size is None (the default), a dataframe with the contents of the VCF file. Otherwise, an iterable of dataframes, each with chunk_size rows.
1.804107
1.86092
0.96947
dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = "" for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split("\n") previous = lines.pop() for line in lines: yield line yield previous
def stream_gzip_decompress_lines(stream)
Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines.
2.756079
2.9537
0.933094
if genome: return infer_genome(genome) elif reference_vcf_key not in vcf_reader.metadata: raise ValueError("Unable to infer reference genome for %s" % ( vcf_reader.filename,)) else: reference_path = vcf_reader.metadata[reference_vcf_key] return infer_genome(reference_path)
def infer_genome_from_vcf(genome, vcf_reader, reference_vcf_key)
Helper function to make a pyensembl.Genome instance.
2.653653
2.676615
0.991421
assert transcript.protein_sequence is not None, \ "Expect transcript %s to have protein sequence" % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate( nucleotide_sequence=sequence_from_mutated_codon, first_codon_is_start=False, to_stop=True, truncate=True) if mutated_codon_index == 0: # TODO: scan through sequence_from_mutated_codon for # Kozak sequence + start codon to choose the new start return StartLoss(variant=variant, transcript=transcript) # the frameshifted sequence may contain some amino acids which are # the same as the original protein! _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix( ref=original_protein_sequence[mutated_codon_index:], alt=mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = mutated_codon_index + n_unchanged_amino_acids # miraculously, this frameshift left the protein unchanged, # most likely by turning one stop codon into another stop codon if n_unchanged_amino_acids == 0: aa_ref = "" else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: # frameshift is either extending the protein or leaving it unchanged if len(mutant_protein_suffix) == 0: return Silent( variant=variant, transcript=transcript, aa_pos=mutated_codon_index, aa_ref=aa_ref) else: # When all the amino acids are the same as the original, we either # have the original protein or we've extended it. # If we've extended it, it means we must have lost our stop codon. return StopLoss( variant=variant, transcript=transcript, aa_ref=aa_ref, aa_alt=mutant_protein_suffix) # original amino acid at the mutated codon before the frameshift occurred aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] # TODO: what if all the shifted amino acids were the same and the protein # ended up the same length? Add a Silent case? if len(mutant_protein_suffix) == 0: # if a frameshift doesn't create any new amino acids, then # it must immediately have hit a stop codon return FrameShiftTruncation( variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) return FrameShift( variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
def create_frameshift_effect( mutated_codon_index, sequence_from_mutated_codon, variant, transcript)
Determine frameshift effect within a coding sequence (possibly affecting either the start or stop codons, or anythign in between) Parameters ---------- mutated_codon_index : int Codon offset (starting from 0 = start codon) of first non-reference amino acid in the variant protein sequence_from_mutated_codon: Bio.Seq Sequence of mutated cDNA, starting from first mutated codon, until the end of the transcript variant : Variant transcript : transcript
3.621545
3.554702
1.018804
# special logic for insertions coding_sequence_after_insertion = \ sequence_from_start_codon[cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: # insertion happens after last nucleotide in a codon, # doesn't disrupt the existing codon from cds_offset-2 to cds_offset mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = "" elif cds_offset_before_insertion % 3 == 1: # insertion happens after 2nd nucleotide of a codon # codon positions: # 1) cds_offset - 1 # 2) cds_offset # <----- Insertsion # 3) cds_offset + 1 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain two reference # nucleotides before the insertion nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: # insertion happens after 1st nucleotide of a codon # codon positions: # 1) cds_offset # <----- Insertsion # 2) cds_offset + 1 # 3) cds_offset + 2 mutated_codon_index = cds_offset_before_insertion // 3 # the first codon in the returned sequence will contain one reference # nucleotide before the insertion nucleotides_before = sequence_from_start_codon[cds_offset_before_insertion] sequence_from_mutated_codon = ( nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
def cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides)
Returns index of mutated codon and nucleotide sequence starting at the first mutated codon.
2.424917
2.337307
1.037483
mutated_codon_index = cds_offset // 3 # get the sequence starting from the first modified codon until the end # of the transcript. sequence_after_mutated_codon = \ sequence_from_start_codon[mutated_codon_index * 3:] # the variant's ref nucleotides should start either 0, 1, or 2 nucleotides # into `sequence_after_mutated_codon` offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute( sequence=sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref=trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
def cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt)
Logic for any frameshift which isn't an insertion. We have insertions as a special case since our base-inclusive indexing means something different for insertions: cds_offset = base before insertion Whereas in this case: cds_offset = first reference base affected by a variant Returns index of first modified codon and sequence from that codon onward.
3.259797
3.076562
1.059558
if len(trimmed_cdna_ref) != 0: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt) else: mutated_codon_index, sequence_from_mutated_codon = \ cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset_before_insertion=cds_offset, inserted_nucleotides=trimmed_cdna_alt) return create_frameshift_effect( mutated_codon_index=mutated_codon_index, sequence_from_mutated_codon=sequence_from_mutated_codon, variant=variant, transcript=transcript)
def predict_frameshift_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset, sequence_from_start_codon)
Coding effect of a frameshift mutation. Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : nucleotide sequence Reference nucleotides in the coding sequence of the given transcript. trimmed_cdna_alt : nucleotide sequence Alternate nucleotides introduced by mutation cds_offset : int Offset into the CDS of first ref nucleotide. For insertions, this is the offset of the last ref nucleotide before the insertion. sequence_from_start_codon : nucleotide sequence Nucleotides of the coding sequence and 3' UTR
1.929478
2.010917
0.959502
# if this variant isn't overlapping any genes, return a # Intergenic effect # TODO: look for nearby genes and mark those as Upstream and Downstream # effects if len(variant.gene_ids) == 0: effects = [Intergenic(variant)] else: # list of all MutationEffects for all genes & transcripts effects = [] # group transcripts by their gene ID transcripts_grouped_by_gene = groupby_field(variant.transcripts, 'gene_id') # want effects in the list grouped by the gene they come from for gene_id in sorted(variant.gene_ids): if gene_id not in transcripts_grouped_by_gene: # intragenic variant overlaps a gene but not any transcripts gene = variant.ensembl.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: # gene ID has transcripts overlapped by this variant for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript( variant=variant, transcript=transcript) else: effect = predict_variant_effect_on_transcript_or_failure( variant=variant, transcript=transcript) effects.append(effect) return EffectCollection(effects)
def predict_variant_effects(variant, raise_on_error=False)
Determine the effects of a variant on any transcripts it overlaps. Returns an EffectCollection object. Parameters ---------- variant : Variant raise_on_error : bool Raise an exception if we encounter an error while trying to determine the effect of this variant on a transcript, or simply log the error and continue.
4.034931
3.89439
1.036088
try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript)
def predict_variant_effect_on_transcript_or_failure(variant, transcript)
Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values.
3.410405
3.186476
1.070275
assert distance_to_exon > 0, \ "Expected intronic effect to have distance_to_exon > 0, got %d" % ( distance_to_exon,) if nearest_exon.strand == "+": # if exon on positive strand start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or (variant.is_insertion and start_same) else: # if exon on negative strand end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or (variant.is_insertion and end_same) # distance cutoffs based on consensus splice sequences from # http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/ # 5' splice site: MAG|GURAGU consensus # M is A or C; R is purine; | is the exon-intron boundary # 3' splice site: YAG|R if distance_to_exon <= 2: if before_exon: # 2 last nucleotides of intron before exon are the splice # acceptor site, typically "AG" return SpliceAcceptor else: # 2 first nucleotides of intron after exon are the splice donor # site, typically "GT" return SpliceDonor elif not before_exon and distance_to_exon <= 6: # variants in nucleotides 3-6 at start of intron aren't as certain # to cause problems as nucleotides 1-2 but still implicated in # alternative splicing return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: # nucleotide -3 before exon is part of the 3' splicing # motif but allows for more degeneracy than the -2, -1 nucleotides return IntronicSpliceSite else: # intronic mutation unrelated to splicing return Intronic
def choose_intronic_effect_class( variant, nearest_exon, distance_to_exon)
Infer effect of variant which does not overlap any exon of the given transcript.
4.50369
4.599055
0.979264
# create an empty list for every new key groups = defaultdict(list) for record in records: value = fn(record) if value is not None or not skip_none: groups[value].append(record) return dict(groups)
def apply_groupby(records, fn, skip_none=False)
Given a list of objects, group them into a dictionary by applying fn to each one and using returned values as a dictionary key. Parameters ---------- records : list fn : function skip_none : bool If False, then None can be a key in the returned dictionary, otherwise records whose key value is None get skipped. Returns dict.
3.408764
3.423527
0.995688
return apply_groupby( records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
def groupby_field(records, field_name, skip_none=True)
Given a list of objects, group them into a dictionary by the unique values of a given field name.
4.24786
5.460448
0.777932
memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
def memoize(fn)
Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared.
1.830761
2.070088
0.884388
# ensure that start_pos:end_pos overlap with transcript positions if start > end: raise ValueError( "start_pos %d shouldn't be greater than end_pos %d" % ( start, end)) if start > transcript.end: raise ValueError( "Range %d:%d starts after transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) if end < transcript.start: raise ValueError( "Range %d:%d ends before transcript %s (%d:%d)" % ( start, end, transcript, transcript.start, transcript.end)) # trim the start position to the beginning of the transcript if start < transcript.start: start = transcript.start # trim the end position to the end of the transcript if end > transcript.end: end = transcript.end # return earliest offset into the spliced transcript return min( transcript.spliced_offset(start), transcript.spliced_offset(end))
def interval_offset_on_transcript(start, end, transcript)
Given an interval [start:end] and a particular transcript, return the start offset of the interval relative to the chromosomal positions of the transcript.
2.423134
2.445152
0.990995
return self.filter_above_threshold( key_fn=lambda effect: effect.transcript_id, value_dict=transcript_expression_dict, threshold=min_expression_value)
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0)
Filters effects to those which have an associated transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
5.512553
5.794988
0.951262
return self.filter_above_threshold( key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value)
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0)
Filters effects to those which have an associated gene whose expression value in the gene_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
5.17275
5.7394
0.90127