source_code
stringlengths
45
7.24k
function_name
stringlengths
1
111
messages
list
def x(effects, filters): for filter_fn in filters: filtered_effects = filter_fn(effects) if len(effects) == 1: return effects elif len(filtered_effects) > 1: effects = filtered_effects return effects
filter_pipeline
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(effects, filters):\n for filter_fn in filters:\n filtered_effects = filter_fn(effects)\n if len(effects) == 1:\n return effects\n elif len(filtered_effects) > 1:\n effects = filtered_effects\n return effects\n", "role": "user" }, { "content": "filter_pipeline", "role": "assistant" } ]
def x(effects): effects = filter_pipeline(effects=effects, filters=[ keep_max_priority_effects, keep_effects_on_protein_coding_genes, keep_effects_on_protein_coding_transcripts, keep_effects_on_complete_transcripts]) if len(effects) == 1: return effects[0] cds_lengths = [cds_length_of_associated_transcript(e) for e in effects] max_cds_length = max(cds_lengths) max_cds_length_indices = {i for i, l in enumerate(cds_lengths) if l == max_cds_length} seq_lengths = [length_of_associated_transcript(e) for e in effects] max_seq_length = max(seq_lengths) max_seq_length_indices = {i for i, l in enumerate(seq_lengths) if l == max_seq_length} intersection_of_indices = max_cds_length_indices.intersection( max_seq_length_indices) n_candidates = len(intersection_of_indices) if n_candidates == 1: best_index = intersection_of_indices.pop() return effects[best_index] elif n_candidates == 0: union_of_indices = max_cds_length_indices.union(max_seq_length_indices) candidate_effects = [effects[i] for i in union_of_indices] else: candidate_effects = [effects[i] for i in intersection_of_indices] return max(candidate_effects, key= tie_breaking_sort_key_for_single_gene_effects)
top_priority_effect_for_single_gene
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(effects):\n effects = filter_pipeline(effects=effects, filters=[\n keep_max_priority_effects, keep_effects_on_protein_coding_genes,\n keep_effects_on_protein_coding_transcripts,\n keep_effects_on_complete_transcripts])\n if len(effects) == 1:\n return effects[0]\n cds_lengths = [cds_length_of_associated_transcript(e) for e in effects]\n max_cds_length = max(cds_lengths)\n max_cds_length_indices = {i for i, l in enumerate(cds_lengths) if l ==\n max_cds_length}\n seq_lengths = [length_of_associated_transcript(e) for e in effects]\n max_seq_length = max(seq_lengths)\n max_seq_length_indices = {i for i, l in enumerate(seq_lengths) if l ==\n max_seq_length}\n intersection_of_indices = max_cds_length_indices.intersection(\n max_seq_length_indices)\n n_candidates = len(intersection_of_indices)\n if n_candidates == 1:\n best_index = intersection_of_indices.pop()\n return effects[best_index]\n elif n_candidates == 0:\n union_of_indices = max_cds_length_indices.union(max_seq_length_indices)\n candidate_effects = [effects[i] for i in union_of_indices]\n else:\n candidate_effects = [effects[i] for i in intersection_of_indices]\n return max(candidate_effects, key=\n tie_breaking_sort_key_for_single_gene_effects)\n", "role": "user" }, { "content": "top_priority_effect_for_single_gene", "role": "assistant" } ]
def x(effects): if len(effects) == 0: raise ValueError('List of effects cannot be empty') effects = map(select_between_exonic_splice_site_and_alternate_effect, effects) effects_grouped_by_gene = apply_groupby(effects, fn= gene_id_of_associated_transcript, skip_none=False) if None in effects_grouped_by_gene: effects_without_genes = effects_grouped_by_gene.pop(None) else: effects_without_genes = [] if len(effects_grouped_by_gene) > 0: effects_with_genes = [top_priority_effect_for_single_gene( gene_effects) for gene_effects in effects_grouped_by_gene.values()] return max(effects_with_genes, key=multi_gene_effect_sort_key) else: assert len(effects_without_genes) > 0 return max(effects_without_genes, key=multi_gene_effect_sort_key)
top_priority_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(effects):\n if len(effects) == 0:\n raise ValueError('List of effects cannot be empty')\n effects = map(select_between_exonic_splice_site_and_alternate_effect,\n effects)\n effects_grouped_by_gene = apply_groupby(effects, fn=\n gene_id_of_associated_transcript, skip_none=False)\n if None in effects_grouped_by_gene:\n effects_without_genes = effects_grouped_by_gene.pop(None)\n else:\n effects_without_genes = []\n if len(effects_grouped_by_gene) > 0:\n effects_with_genes = [top_priority_effect_for_single_gene(\n gene_effects) for gene_effects in effects_grouped_by_gene.values()]\n return max(effects_with_genes, key=multi_gene_effect_sort_key)\n else:\n assert len(effects_without_genes) > 0\n return max(effects_without_genes, key=multi_gene_effect_sort_key)\n", "role": "user" }, { "content": "top_priority_effect", "role": "assistant" } ]
def x(self): return dict(variants=self.variants, distinct=self.distinct, sort_key= self.sort_key, sources=self.sources, source_to_metadata_dict=self. source_to_metadata_dict)
to_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return dict(variants=self.variants, distinct=self.distinct, sort_key=\n self.sort_key, sources=self.sources, source_to_metadata_dict=self.\n source_to_metadata_dict)\n", "role": "user" }, { "content": "to_dict", "role": "assistant" } ]
def x(self, new_elements): kwargs = self.to_dict() kwargs['variants'] = new_elements return self.from_dict(kwargs)
clone_with_new_elements
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, new_elements):\n kwargs = self.to_dict()\n kwargs['variants'] = new_elements\n return self.from_dict(kwargs)\n", "role": "user" }, { "content": "clone_with_new_elements", "role": "assistant" } ]
def x(self, raise_on_error=True): return EffectCollection([effect for variant in self for effect in variant.effects(raise_on_error=raise_on_error)])
effects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, raise_on_error=True):\n return EffectCollection([effect for variant in self for effect in\n variant.effects(raise_on_error=raise_on_error)])\n", "role": "user" }, { "content": "effects", "role": "assistant" } ]
def x(self): return {gene_name: len(group) for gene_name, group in self. groupby_gene_name().items()}
gene_counts
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return {gene_name: len(group) for gene_name, group in self.\n groupby_gene_name().items()}\n", "role": "user" }, { "content": "gene_counts", "role": "assistant" } ]
def x(self, transcript_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold(multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
filter_by_transcript_expression
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, transcript_expression_dict, min_expression_value=0.0):\n return self.filter_any_above_threshold(multi_key_fn=lambda variant:\n variant.transcript_ids, value_dict=transcript_expression_dict,\n threshold=min_expression_value)\n", "role": "user" }, { "content": "filter_by_transcript_expression", "role": "assistant" } ]
def x(self, gene_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold(multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold= min_expression_value)
filter_by_gene_expression
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, gene_expression_dict, min_expression_value=0.0):\n return self.filter_any_above_threshold(multi_key_fn=lambda effect:\n effect.gene_ids, value_dict=gene_expression_dict, threshold=\n min_expression_value)\n", "role": "user" }, { "content": "filter_by_gene_expression", "role": "assistant" } ]
def x(self, other): return self.__class__ == other.__class__ and len(self) == len(other ) and all(x.exactly_equal(y) for x, y in zip(self, other))
exactly_equal
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, other):\n return self.__class__ == other.__class__ and len(self) == len(other\n ) and all(x.exactly_equal(y) for x, y in zip(self, other))\n", "role": "user" }, { "content": "exactly_equal", "role": "assistant" } ]
def x(cls, dictionaries): combined_dictionary = {} for source_to_metadata_dict in dictionaries: for source_name, variant_to_metadata_dict in source_to_metadata_dict.items( ): combined_dictionary.setdefault(source_name, {}) combined_source_dict = combined_dictionary[source_name] for variant, metadata_dict in variant_to_metadata_dict.items(): combined_source_dict.setdefault(variant, {}) combined_source_dict[variant].update(metadata_dict) return combined_dictionary
merge_metadata_dictionaries
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, dictionaries):\n combined_dictionary = {}\n for source_to_metadata_dict in dictionaries:\n for source_name, variant_to_metadata_dict in source_to_metadata_dict.items(\n ):\n combined_dictionary.setdefault(source_name, {})\n combined_source_dict = combined_dictionary[source_name]\n for variant, metadata_dict in variant_to_metadata_dict.items():\n combined_source_dict.setdefault(variant, {})\n combined_source_dict[variant].update(metadata_dict)\n return combined_dictionary\n", "role": "user" }, { "content": "merge_metadata_dictionaries", "role": "assistant" } ]
def x(cls, combine_fn, variant_collections, kwargs): kwargs['variants'] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs['source_to_metadata_dict'] = cls._merge_metadata_dictionaries([ vc.source_to_metadata_dict for vc in variant_collections]) kwargs['sources'] = set.union(*[vc.sources for vc in variant_collections]) for key, value in variant_collections[0].to_dict().items(): if key not in kwargs: kwargs[key] = value return cls(**kwargs)
combine_variant_collections
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(cls, combine_fn, variant_collections, kwargs):\n kwargs['variants'] = combine_fn(*[set(vc) for vc in variant_collections])\n kwargs['source_to_metadata_dict'] = cls._merge_metadata_dictionaries([\n vc.source_to_metadata_dict for vc in variant_collections])\n kwargs['sources'] = set.union(*[vc.sources for vc in variant_collections])\n for key, value in variant_collections[0].to_dict().items():\n if key not in kwargs:\n kwargs[key] = value\n return cls(**kwargs)\n", "role": "user" }, { "content": "combine_variant_collections", "role": "assistant" } ]
def x(self, *others, **kwargs): return self._combine_variant_collections(combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
union
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *others, **kwargs):\n return self._combine_variant_collections(combine_fn=set.union,\n variant_collections=(self,) + others, kwargs=kwargs)\n", "role": "user" }, { "content": "union", "role": "assistant" } ]
def x(self, *others, **kwargs): return self._combine_variant_collections(combine_fn=set.intersection, variant_collections=(self,) + others, kwargs=kwargs)
intersection
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *others, **kwargs):\n return self._combine_variant_collections(combine_fn=set.intersection,\n variant_collections=(self,) + others, kwargs=kwargs)\n", "role": "user" }, { "content": "intersection", "role": "assistant" } ]
def x(self): def x(variant): return OrderedDict([('chr', variant.contig), ('start', variant. original_start), ('ref', variant.original_ref), ('alt', variant .original_alt), ('gene_name', ';'.join(variant.gene_names)), ( 'gene_id', ';'.join(variant.gene_ids))]) rows = [row_from_variant(v) for v in self] if len(rows) == 0: return pd.DataFrame() return pd.DataFrame.from_records(rows, columns=rows[0].keys())
to_dataframe
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n\n def x(variant):\n return OrderedDict([('chr', variant.contig), ('start', variant.\n original_start), ('ref', variant.original_ref), ('alt', variant\n .original_alt), ('gene_name', ';'.join(variant.gene_names)), (\n 'gene_id', ';'.join(variant.gene_ids))])\n rows = [row_from_variant(v) for v in self]\n if len(rows) == 0:\n return pd.DataFrame()\n return pd.DataFrame.from_records(rows, columns=rows[0].keys())\n", "role": "user" }, { "content": "to_dataframe", "role": "assistant" } ]
def x(ref, alt): n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
trim_shared_prefix
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(ref, alt):\n n_ref = len(ref)\n n_alt = len(alt)\n n_min = min(n_ref, n_alt)\n i = 0\n while i < n_min and ref[i] == alt[i]:\n i += 1\n prefix = ref[:i]\n ref_suffix = ref[i:]\n alt_suffix = alt[i:]\n return ref_suffix, alt_suffix, prefix\n", "role": "user" }, { "content": "trim_shared_prefix", "role": "assistant" } ]
def x(ref, alt): n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[-i - 1] == alt[-i - 1]: i += 1 if i == 0: return ref, alt, '' return ref[:-i], alt[:-i], ref[-i:]
trim_shared_suffix
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(ref, alt):\n n_ref = len(ref)\n n_alt = len(alt)\n n_min = min(n_ref, n_alt)\n i = 0\n while i < n_min and ref[-i - 1] == alt[-i - 1]:\n i += 1\n if i == 0:\n return ref, alt, ''\n return ref[:-i], alt[:-i], ref[-i:]\n", "role": "user" }, { "content": "trim_shared_suffix", "role": "assistant" } ]
def x(ref, alt): ref, alt, prefix = trim_shared_prefix(ref, alt) ref, alt, suffix = trim_shared_suffix(ref, alt) return ref, alt, prefix, suffix
trim_shared_flanking_strings
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(ref, alt):\n ref, alt, prefix = trim_shared_prefix(ref, alt)\n ref, alt, suffix = trim_shared_suffix(ref, alt)\n return ref, alt, prefix, suffix\n", "role": "user" }, { "content": "trim_shared_flanking_strings", "role": "assistant" } ]
def x(args_list=None): print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) effects = variants.effects() if args.only_coding: effects = effects.drop_silent_and_noncoding() if args.one_per_variant: variant_to_effect_dict = effects.top_priority_effect_per_variant() effects = effects.clone_with_new_elements(list( variant_to_effect_dict.values())) effects_dataframe = effects.to_dataframe() logger.info('\n%s', effects) if args.output_csv: effects_dataframe.to_csv(args.output_csv, index=False)
main
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(args_list=None):\n print_version_info()\n if args_list is None:\n args_list = sys.argv[1:]\n args = arg_parser.parse_args(args_list)\n variants = variant_collection_from_args(args)\n effects = variants.effects()\n if args.only_coding:\n effects = effects.drop_silent_and_noncoding()\n if args.one_per_variant:\n variant_to_effect_dict = effects.top_priority_effect_per_variant()\n effects = effects.clone_with_new_elements(list(\n variant_to_effect_dict.values()))\n effects_dataframe = effects.to_dataframe()\n logger.info('\\n%s', effects)\n if args.output_csv:\n effects_dataframe.to_csv(args.output_csv, index=False)\n", "role": "user" }, { "content": "main", "role": "assistant" } ]
def x(variant, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): ref_codon_start_offset = cds_offset // 3 nucleotide_offset_into_first_ref_codon = cds_offset % 3 n_ref_nucleotides = len(trimmed_cdna_ref) if n_ref_nucleotides == 0: if nucleotide_offset_into_first_ref_codon == 2: ref_codon_end_offset = ref_codon_start_offset else: ref_codon_end_offset = ref_codon_start_offset + 1 ref_codons = sequence_from_start_codon[ref_codon_start_offset * 3: ref_codon_end_offset * 3] prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1] suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:] else: ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1 ref_codons = sequence_from_start_codon[ref_codon_start_offset * 3: ref_codon_end_offset * 3] prefix = ref_codons[:nucleotide_offset_into_first_ref_codon] offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3 if offset_in_last_ref_codon == 0: suffix = ref_codons[-2:] elif offset_in_last_ref_codon == 1: suffix = ref_codons[-1:] else: suffix = '' mutant_codons = prefix + trimmed_cdna_alt + suffix assert len(mutant_codons ) % 3 == 0, 'Expected in-frame mutation but got %s (length = %d)' % ( mutant_codons, len(mutant_codons)) return ref_codon_start_offset, ref_codon_end_offset, mutant_codons
get_codons
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, trimmed_cdna_ref, trimmed_cdna_alt,\n sequence_from_start_codon, cds_offset):\n ref_codon_start_offset = cds_offset // 3\n nucleotide_offset_into_first_ref_codon = cds_offset % 3\n n_ref_nucleotides = len(trimmed_cdna_ref)\n if n_ref_nucleotides == 0:\n if nucleotide_offset_into_first_ref_codon == 2:\n ref_codon_end_offset = ref_codon_start_offset\n else:\n ref_codon_end_offset = ref_codon_start_offset + 1\n ref_codons = sequence_from_start_codon[ref_codon_start_offset * 3:\n ref_codon_end_offset * 3]\n prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1]\n suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:]\n else:\n ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1\n ref_codons = sequence_from_start_codon[ref_codon_start_offset * 3:\n ref_codon_end_offset * 3]\n prefix = ref_codons[:nucleotide_offset_into_first_ref_codon]\n offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3\n if offset_in_last_ref_codon == 0:\n suffix = ref_codons[-2:]\n elif offset_in_last_ref_codon == 1:\n suffix = ref_codons[-1:]\n else:\n suffix = ''\n mutant_codons = prefix + trimmed_cdna_alt + suffix\n assert len(mutant_codons\n ) % 3 == 0, 'Expected in-frame mutation but got %s (length = %d)' % (\n mutant_codons, len(mutant_codons))\n return ref_codon_start_offset, ref_codon_end_offset, mutant_codons\n", "role": "user" }, { "content": "get_codons", "role": "assistant" } ]
def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons( variant=variant, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon= sequence_from_start_codon, cds_offset=cds_offset) mutation_affects_start_codon = ref_codon_start_offset == 0 if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS: return StartLoss(variant=variant, transcript=transcript) aa_ref = transcript.protein_sequence[ref_codon_start_offset: ref_codon_end_offset] reference_protein_length = len(transcript.protein_sequence) aa_alt, mutant_stop_codon_index, using_three_prime_utr = ( translate_in_frame_mutation(transcript=transcript, ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset =ref_codon_end_offset, mutant_codons=mutant_codons)) mutant_codons_contain_stop = mutant_stop_codon_index != -1 aa_ref, aa_alt, shared_prefix, shared_suffix = ( trim_shared_flanking_strings(aa_ref, aa_alt)) n_aa_ref = len(aa_ref) n_aa_alt = len(aa_alt) n_aa_shared = len(shared_prefix) is_insertion = ref_codon_start_offset == ref_codon_end_offset aa_mutation_start_offset = (ref_codon_start_offset + n_aa_shared + is_insertion) if mutant_codons_contain_stop: mutant_stop_codon_index += n_aa_shared if mutation_affects_start_codon and aa_ref == aa_alt: return AlternateStartCodon(variant=variant, transcript=transcript, ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3]) n_ref_amino_acids_after_mutated_site = (reference_protein_length - aa_mutation_start_offset - 1) if (mutant_codons_contain_stop and n_aa_alt <= n_ref_amino_acids_after_mutated_site): return PrematureStop(variant=variant, transcript=transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref= aa_ref, aa_alt=aa_alt) if (aa_mutation_start_offset > reference_protein_length or n_aa_ref == n_aa_alt == 0): return Silent(variant=variant, transcript=transcript, aa_pos= aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix) elif using_three_prime_utr: return StopLoss(variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt) elif n_aa_alt == 0: return Deletion(variant, transcript, aa_mutation_start_offset= aa_mutation_start_offset, aa_ref=aa_ref) elif n_aa_ref == 0: return Insertion(variant, transcript, aa_mutation_start_offset= aa_mutation_start_offset, aa_alt=aa_alt) elif n_aa_ref == n_aa_alt == 1: return Substitution(variant, transcript, aa_mutation_start_offset= aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) else: return ComplexSubstitution(variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref= aa_ref, aa_alt=aa_alt)
predict_in_frame_coding_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt,\n sequence_from_start_codon, cds_offset):\n ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons(\n variant=variant, trimmed_cdna_ref=trimmed_cdna_ref,\n trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon=\n sequence_from_start_codon, cds_offset=cds_offset)\n mutation_affects_start_codon = ref_codon_start_offset == 0\n if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS:\n return StartLoss(variant=variant, transcript=transcript)\n aa_ref = transcript.protein_sequence[ref_codon_start_offset:\n ref_codon_end_offset]\n reference_protein_length = len(transcript.protein_sequence)\n aa_alt, mutant_stop_codon_index, using_three_prime_utr = (\n translate_in_frame_mutation(transcript=transcript,\n ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset\n =ref_codon_end_offset, mutant_codons=mutant_codons))\n mutant_codons_contain_stop = mutant_stop_codon_index != -1\n aa_ref, aa_alt, shared_prefix, shared_suffix = (\n trim_shared_flanking_strings(aa_ref, aa_alt))\n n_aa_ref = len(aa_ref)\n n_aa_alt = len(aa_alt)\n n_aa_shared = len(shared_prefix)\n is_insertion = ref_codon_start_offset == ref_codon_end_offset\n aa_mutation_start_offset = (ref_codon_start_offset + n_aa_shared +\n is_insertion)\n if mutant_codons_contain_stop:\n mutant_stop_codon_index += n_aa_shared\n if mutation_affects_start_codon and aa_ref == aa_alt:\n return AlternateStartCodon(variant=variant, transcript=transcript,\n ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3])\n n_ref_amino_acids_after_mutated_site = (reference_protein_length -\n aa_mutation_start_offset - 1)\n if (mutant_codons_contain_stop and n_aa_alt <=\n n_ref_amino_acids_after_mutated_site):\n return PrematureStop(variant=variant, transcript=transcript,\n aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=\n aa_ref, aa_alt=aa_alt)\n if (aa_mutation_start_offset > reference_protein_length or n_aa_ref ==\n n_aa_alt == 0):\n return Silent(variant=variant, transcript=transcript, aa_pos=\n aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix)\n elif using_three_prime_utr:\n return StopLoss(variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt)\n elif n_aa_alt == 0:\n return Deletion(variant, transcript, aa_mutation_start_offset=\n aa_mutation_start_offset, aa_ref=aa_ref)\n elif n_aa_ref == 0:\n return Insertion(variant, transcript, aa_mutation_start_offset=\n aa_mutation_start_offset, aa_alt=aa_alt)\n elif n_aa_ref == n_aa_alt == 1:\n return Substitution(variant, transcript, aa_mutation_start_offset=\n aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt)\n else:\n return ComplexSubstitution(variant, transcript,\n aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=\n aa_ref, aa_alt=aa_alt)\n", "role": "user" }, { "content": "predict_in_frame_coding_effect", "role": "assistant" } ]
def x(arg_parser): variant_arg_group = arg_parser.add_argument_group(title='Variants', description='Genomic variant files') variant_arg_group.add_argument('--vcf', default=[], action='append', help='Genomic variants in VCF format') variant_arg_group.add_argument('--maf', default=[], action='append', help="Genomic variants in TCGA's MAF format") variant_arg_group.add_argument('--variant', default=[], action='append', nargs=4, metavar=('CHR', 'POS', 'REF', 'ALT'), help= "Individual variant as 4 arguments giving chromsome, position, ref, and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles for insertions or deletions." ) variant_arg_group.add_argument('--genome', type=str, help= "What reference assembly your variant coordinates are using. Examples: 'hg19', 'GRCh38', or 'mm9'. This argument is ignored for MAF files, since each row includes the reference. For VCF files, this is used if specified, and otherwise is guessed from the header. For variants specfied on the commandline with --variant, this option is required." ) variant_arg_group.add_argument('--download-reference-genome-data', action='store_true', default=False, help= "Automatically download genome reference data required for annotation using PyEnsembl. Otherwise you must first run 'pyensembl install' for the release/species corresponding to the genome used in your VCF." ) variant_arg_group.add_argument('--json-variants', default=[], action= 'append', help= 'Path to Varcode.VariantCollection object serialized as a JSON file.') return variant_arg_group
add_variant_args
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(arg_parser):\n variant_arg_group = arg_parser.add_argument_group(title='Variants',\n description='Genomic variant files')\n variant_arg_group.add_argument('--vcf', default=[], action='append',\n help='Genomic variants in VCF format')\n variant_arg_group.add_argument('--maf', default=[], action='append',\n help=\"Genomic variants in TCGA's MAF format\")\n variant_arg_group.add_argument('--variant', default=[], action='append',\n nargs=4, metavar=('CHR', 'POS', 'REF', 'ALT'), help=\n \"Individual variant as 4 arguments giving chromsome, position, ref, and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles for insertions or deletions.\"\n )\n variant_arg_group.add_argument('--genome', type=str, help=\n \"What reference assembly your variant coordinates are using. Examples: 'hg19', 'GRCh38', or 'mm9'. This argument is ignored for MAF files, since each row includes the reference. For VCF files, this is used if specified, and otherwise is guessed from the header. For variants specfied on the commandline with --variant, this option is required.\"\n )\n variant_arg_group.add_argument('--download-reference-genome-data',\n action='store_true', default=False, help=\n \"Automatically download genome reference data required for annotation using PyEnsembl. Otherwise you must first run 'pyensembl install' for the release/species corresponding to the genome used in your VCF.\"\n )\n variant_arg_group.add_argument('--json-variants', default=[], action=\n 'append', help=\n 'Path to Varcode.VariantCollection object serialized as a JSON file.')\n return variant_arg_group\n", "role": "user" }, { "content": "add_variant_args", "role": "assistant" } ]
def x(sequence, offset, new_residues): assert 0 < offset <= len(sequence ), 'Invalid position %d for sequence of length %d' % (offset, len( sequence)) prefix = sequence[:offset] suffix = sequence[offset:] return prefix + new_residues + suffix
insert_before
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence, offset, new_residues):\n assert 0 < offset <= len(sequence\n ), 'Invalid position %d for sequence of length %d' % (offset, len(\n sequence))\n prefix = sequence[:offset]\n suffix = sequence[offset:]\n return prefix + new_residues + suffix\n", "role": "user" }, { "content": "insert_before", "role": "assistant" } ]
def x(sequence, offset, new_residues): assert 0 <= offset < len(sequence ), 'Invalid position %d for sequence of length %d' % (offset, len( sequence)) prefix = sequence[:offset + 1] suffix = sequence[offset + 1:] return prefix + new_residues + suffix
insert_after
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence, offset, new_residues):\n assert 0 <= offset < len(sequence\n ), 'Invalid position %d for sequence of length %d' % (offset, len(\n sequence))\n prefix = sequence[:offset + 1]\n suffix = sequence[offset + 1:]\n return prefix + new_residues + suffix\n", "role": "user" }, { "content": "insert_after", "role": "assistant" } ]
def x(sequence, offset, ref, alt): n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref ), 'Reference %s at offset %d != expected reference %s' % (sequence_ref , offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
substitute
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence, offset, ref, alt):\n n_ref = len(ref)\n sequence_ref = sequence[offset:offset + n_ref]\n assert str(sequence_ref) == str(ref\n ), 'Reference %s at offset %d != expected reference %s' % (sequence_ref\n , offset, ref)\n prefix = sequence[:offset]\n suffix = sequence[offset + n_ref:]\n return prefix + alt + suffix\n", "role": "user" }, { "content": "substitute", "role": "assistant" } ]
def x(assembly_names): match_recency = [int(re.search('\\d+', assembly_name).group()) for assembly_name in assembly_names] most_recent = [x for y, x in sorted(zip(match_recency, assembly_names), reverse=True)][0] return most_recent
most_recent_assembly
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(assembly_names):\n match_recency = [int(re.search('\\\\d+', assembly_name).group()) for\n assembly_name in assembly_names]\n most_recent = [x for y, x in sorted(zip(match_recency, assembly_names),\n reverse=True)][0]\n return most_recent\n", "role": "user" }, { "content": "most_recent_assembly", "role": "assistant" } ]
def x(reference_name_or_path): reference_file_name = os.path.basename(reference_name_or_path) matches = {'file_name': list(), 'full_path': list()} for assembly_name in reference_alias_dict.keys(): candidate_list = [assembly_name] + reference_alias_dict[assembly_name] for candidate in candidate_list: if candidate.lower() in reference_file_name.lower(): matches['file_name'].append(assembly_name) elif candidate.lower() in reference_name_or_path.lower(): matches['full_path'].append(assembly_name) matches['file_name'] = list(set(matches['file_name'])) matches['full_path'] = list(set(matches['full_path'])) if len(matches['file_name']) == 1: match = matches['file_name'][0] elif len(matches['file_name']) > 1: match = _most_recent_assembly(matches['file_name']) warn( 'More than one reference ({}) matches path in header ({}); the most recent one ({}) was used.' .format(','.join(matches['file_name']), reference_file_name, match) ) elif len(matches['full_path']) >= 1: match = _most_recent_assembly(matches['full_path']) warn( 'Reference could not be matched against filename ({}); using best match against full path ({}).' .format(reference_name_or_path, match)) else: raise ValueError('Failed to infer genome assembly name for %s' % reference_name_or_path) return match
infer_reference_name
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(reference_name_or_path):\n reference_file_name = os.path.basename(reference_name_or_path)\n matches = {'file_name': list(), 'full_path': list()}\n for assembly_name in reference_alias_dict.keys():\n candidate_list = [assembly_name] + reference_alias_dict[assembly_name]\n for candidate in candidate_list:\n if candidate.lower() in reference_file_name.lower():\n matches['file_name'].append(assembly_name)\n elif candidate.lower() in reference_name_or_path.lower():\n matches['full_path'].append(assembly_name)\n matches['file_name'] = list(set(matches['file_name']))\n matches['full_path'] = list(set(matches['full_path']))\n if len(matches['file_name']) == 1:\n match = matches['file_name'][0]\n elif len(matches['file_name']) > 1:\n match = _most_recent_assembly(matches['file_name'])\n warn(\n 'More than one reference ({}) matches path in header ({}); the most recent one ({}) was used.'\n .format(','.join(matches['file_name']), reference_file_name, match)\n )\n elif len(matches['full_path']) >= 1:\n match = _most_recent_assembly(matches['full_path'])\n warn(\n 'Reference could not be matched against filename ({}); using best match against full path ({}).'\n .format(reference_name_or_path, match))\n else:\n raise ValueError('Failed to infer genome assembly name for %s' %\n reference_name_or_path)\n return match\n", "role": "user" }, { "content": "infer_reference_name", "role": "assistant" } ]
def x(genome_object_string_or_int): if isinstance(genome_object_string_or_int, Genome): return genome_object_string_or_int if is_integer(genome_object_string_or_int): return cached_release(genome_object_string_or_int) elif is_string(genome_object_string_or_int): reference_name = infer_reference_name(genome_object_string_or_int) return genome_for_reference_name(reference_name) else: raise TypeError( 'Expected genome to be an int, string, or pyensembl.Genome instance, got %s : %s' % (str(genome_object_string_or_int), type( genome_object_string_or_int)))
infer_genome
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(genome_object_string_or_int):\n if isinstance(genome_object_string_or_int, Genome):\n return genome_object_string_or_int\n if is_integer(genome_object_string_or_int):\n return cached_release(genome_object_string_or_int)\n elif is_string(genome_object_string_or_int):\n reference_name = infer_reference_name(genome_object_string_or_int)\n return genome_for_reference_name(reference_name)\n else:\n raise TypeError(\n 'Expected genome to be an int, string, or pyensembl.Genome instance, got %s : %s'\n % (str(genome_object_string_or_int), type(\n genome_object_string_or_int)))\n", "role": "user" }, { "content": "infer_genome", "role": "assistant" } ]
def x(self): return dict(contig=self.original_contig, start=self.original_start, ref =self.original_ref, alt=self.original_alt, ensembl=self.ensembl, allow_extended_nucleotides=self.allow_extended_nucleotides, normalize_contig_name=self.normalize_contig_name)
to_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return dict(contig=self.original_contig, start=self.original_start, ref\n =self.original_ref, alt=self.original_alt, ensembl=self.ensembl,\n allow_extended_nucleotides=self.allow_extended_nucleotides,\n normalize_contig_name=self.normalize_contig_name)\n", "role": "user" }, { "content": "to_dict", "role": "assistant" } ]
def x(self): if self.is_insertion: return 'chr%s g.%d_%dins%s' % (self.contig, self.start, self.start + 1, self.alt) elif self.is_deletion: return 'chr%s g.%d_%ddel%s' % (self.contig, self.start, self.end, self.ref) elif self.ref == self.alt: return 'chr%s g.%d%s' % (self.contig, self.start, self.ref) else: return 'chr%s g.%d%s>%s' % (self.contig, self.start, self.ref, self.alt )
short_description
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n if self.is_insertion:\n return 'chr%s g.%d_%dins%s' % (self.contig, self.start, self.start +\n 1, self.alt)\n elif self.is_deletion:\n return 'chr%s g.%d_%ddel%s' % (self.contig, self.start, self.end,\n self.ref)\n elif self.ref == self.alt:\n return 'chr%s g.%d%s' % (self.contig, self.start, self.ref)\n else:\n return 'chr%s g.%d%s>%s' % (self.contig, self.start, self.ref, self.alt\n )\n", "role": "user" }, { "content": "short_description", "role": "assistant" } ]
def x(self): if self._genes is None: self._genes = self.ensembl.genes_at_locus(self.contig, self.start, self.end) return self._genes
genes
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n if self._genes is None:\n self._genes = self.ensembl.genes_at_locus(self.contig, self.start,\n self.end)\n return self._genes\n", "role": "user" }, { "content": "genes", "role": "assistant" } ]
def x(self): return self.ensembl.gene_ids_at_locus(self.contig, self.start, self.end)
gene_ids
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return self.ensembl.gene_ids_at_locus(self.contig, self.start, self.end)\n", "role": "user" }, { "content": "gene_ids", "role": "assistant" } ]
def x(self): return self.ensembl.gene_names_at_locus(self.contig, self.start, self.end)
gene_names
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return self.ensembl.gene_names_at_locus(self.contig, self.start, self.end)\n", "role": "user" }, { "content": "gene_names", "role": "assistant" } ]
def x(self): return len(self.ref) < len(self.alt) and self.alt.startswith(self.ref)
is_insertion
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return len(self.ref) < len(self.alt) and self.alt.startswith(self.ref)\n", "role": "user" }, { "content": "is_insertion", "role": "assistant" } ]
def x(self): return len(self.ref) > len(self.alt) and self.ref.startswith(self.alt)
is_deletion
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return len(self.ref) > len(self.alt) and self.ref.startswith(self.alt)\n", "role": "user" }, { "content": "is_deletion", "role": "assistant" } ]
def x(self): return len(self.ref) == len(self.alt) == 1 and self.ref != self.alt
is_snv
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return len(self.ref) == len(self.alt) == 1 and self.ref != self.alt\n", "role": "user" }, { "content": "is_snv", "role": "assistant" } ]
def x(self): return self.is_snv and is_purine(self.ref) == is_purine(self.alt)
is_transition
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return self.is_snv and is_purine(self.ref) == is_purine(self.alt)\n", "role": "user" }, { "content": "is_transition", "role": "assistant" } ]
def x(self): return self.is_snv and is_purine(self.ref) != is_purine(self.alt)
is_transversion
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return self.is_snv and is_purine(self.ref) != is_purine(self.alt)\n", "role": "user" }, { "content": "is_transversion", "role": "assistant" } ]
def x(variant_start, n_ref_bases, interval_start, interval_end): if n_ref_bases == 0: return (interval_start <= variant_start and interval_end >= variant_start) variant_end = variant_start + n_ref_bases """ if self._changes_exonic_splice_site( strand_ref, strand_alt,) """ return interval_start <= variant_end and interval_end >= variant_start
variant_overlaps_interval
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant_start, n_ref_bases, interval_start, interval_end):\n if n_ref_bases == 0:\n return (interval_start <= variant_start and interval_end >=\n variant_start)\n variant_end = variant_start + n_ref_bases\n \"\"\"\n if self._changes_exonic_splice_site(\n strand_ref,\n strand_alt,)\n \"\"\"\n return interval_start <= variant_end and interval_end >= variant_start\n", "role": "user" }, { "content": "variant_overlaps_interval", "role": "assistant" } ]
def x(transcript_offset, transcript, transcript_ref, transcript_alt, exon_start_offset, exon_end_offset, exon_number): if exon_number > 1 and transcript_offset == exon_start_offset: if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES: if len(transcript_alt) > 0: if transcript_alt[0] not in PURINE_NUCLEOTIDES: return True else: offset_after_deletion = transcript_offset + len(transcript_ref) if len(transcript.sequence) > offset_after_deletion: next_base = transcript.sequence[offset_after_deletion] if next_base not in PURINE_NUCLEOTIDES: return True if exon_number < len(transcript.exons): if variant_overlaps_interval(variant_start=transcript_offset, n_ref_bases=len(transcript_ref), interval_start=exon_end_offset - 2, interval_end=exon_end_offset): end_of_reference_exon = transcript.sequence[exon_end_offset - 2 :exon_end_offset + 1] if matches_exon_end_pattern(end_of_reference_exon): end_of_variant_exon = end_of_reference_exon if matches_exon_end_pattern(end_of_variant_exon): return True
changes_exonic_splice_site
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(transcript_offset, transcript, transcript_ref, transcript_alt,\n exon_start_offset, exon_end_offset, exon_number):\n if exon_number > 1 and transcript_offset == exon_start_offset:\n if len(transcript_ref) > 0 and transcript_ref[0] in PURINE_NUCLEOTIDES:\n if len(transcript_alt) > 0:\n if transcript_alt[0] not in PURINE_NUCLEOTIDES:\n return True\n else:\n offset_after_deletion = transcript_offset + len(transcript_ref)\n if len(transcript.sequence) > offset_after_deletion:\n next_base = transcript.sequence[offset_after_deletion]\n if next_base not in PURINE_NUCLEOTIDES:\n return True\n if exon_number < len(transcript.exons):\n if variant_overlaps_interval(variant_start=transcript_offset,\n n_ref_bases=len(transcript_ref), interval_start=exon_end_offset -\n 2, interval_end=exon_end_offset):\n end_of_reference_exon = transcript.sequence[exon_end_offset - 2\n :exon_end_offset + 1]\n if matches_exon_end_pattern(end_of_reference_exon):\n end_of_variant_exon = end_of_reference_exon\n if matches_exon_end_pattern(end_of_variant_exon):\n return True\n", "role": "user" }, { "content": "changes_exonic_splice_site", "role": "assistant" } ]
def x(nucleotide, allow_extended_nucleotides=False): if (not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES): raise ValueError( '{} is a non-standard nucleotide, neither purine or pyrimidine' .format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
is_purine
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(nucleotide, allow_extended_nucleotides=False):\n if (not allow_extended_nucleotides and nucleotide not in\n STANDARD_NUCLEOTIDES):\n raise ValueError(\n '{} is a non-standard nucleotide, neither purine or pyrimidine'\n .format(nucleotide))\n return nucleotide in PURINE_NUCLEOTIDES\n", "role": "user" }, { "content": "is_purine", "role": "assistant" } ]
def x(nucleotides, allow_extended_nucleotides=False, empty_chars='.-', treat_nan_as_empty=True): if nucleotides in empty_chars: return '' elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan( nucleotides): return '' require_string(nucleotides, name='nucleotide string') nucleotides = nucleotides.upper() if allow_extended_nucleotides: valid_nucleotides = EXTENDED_NUCLEOTIDES else: valid_nucleotides = STANDARD_NUCLEOTIDES if not set(nucleotides) <= valid_nucleotides: raise ValueError('Invalid character(s) in nucleotide string: %s' % (','.join(set(nucleotides) - valid_nucleotides),)) return nucleotides
normalize_nucleotide_string
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(nucleotides, allow_extended_nucleotides=False, empty_chars='.-',\n treat_nan_as_empty=True):\n if nucleotides in empty_chars:\n return ''\n elif treat_nan_as_empty and isinstance(nucleotides, float) and np.isnan(\n nucleotides):\n return ''\n require_string(nucleotides, name='nucleotide string')\n nucleotides = nucleotides.upper()\n if allow_extended_nucleotides:\n valid_nucleotides = EXTENDED_NUCLEOTIDES\n else:\n valid_nucleotides = STANDARD_NUCLEOTIDES\n if not set(nucleotides) <= valid_nucleotides:\n raise ValueError('Invalid character(s) in nucleotide string: %s' %\n (','.join(set(nucleotides) - valid_nucleotides),))\n return nucleotides\n", "role": "user" }, { "content": "normalize_nucleotide_string", "role": "assistant" } ]
def x(path, genome=None, reference_vcf_key='reference', only_passing=True, allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5, max_variants=None, sort_key=variant_ascending_position_sort_key, distinct=True): require_string(path, 'Path or URL to VCF') parsed_path = parse_url_or_path(path) if parsed_path.scheme and parsed_path.scheme.lower() != 'file': filename, headers = urllib.request.urlretrieve(path) try: new_filename = '%s.%s' % (filename, parsed_path.path.split('.')[-1] ) os.rename(filename, new_filename) filename = new_filename return load_vcf(filename, genome=genome, reference_vcf_key= reference_vcf_key, only_passing=only_passing, allow_extended_nucleotides=allow_extended_nucleotides, include_info=include_info, chunk_size=chunk_size, max_variants=max_variants, sort_key=sort_key, distinct=distinct ) finally: logger.info('Removing temporary file: %s', filename) os.unlink(filename) handle = PyVCFReaderFromPathOrURL(path) handle.close() genome = infer_genome_from_vcf(genome, handle.vcf_reader, reference_vcf_key ) df_iterator = read_vcf_into_dataframe(path, include_info=include_info, sample_names=handle.vcf_reader.samples if include_info else None, chunk_size=chunk_size) if include_info: def x(unparsed_sample_info_strings, format_string): return pyvcf_calls_to_sample_info_list(handle.vcf_reader. _parse_samples(unparsed_sample_info_strings, format_string, None)) else: sample_info_parser = None return dataframes_to_variant_collection(df_iterator, source_path=path, info_parser=handle.vcf_reader._parse_info if include_info else None, only_passing=only_passing, max_variants=max_variants, sample_names= handle.vcf_reader.samples if include_info else None, sample_info_parser=sample_info_parser, variant_kwargs={'ensembl': genome, 'allow_extended_nucleotides': allow_extended_nucleotides}, variant_collection_kwargs={'sort_key': sort_key, 'distinct': distinct})
load_vcf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path, genome=None, reference_vcf_key='reference', only_passing=True,\n allow_extended_nucleotides=False, include_info=True, chunk_size=10 ** 5,\n max_variants=None, sort_key=variant_ascending_position_sort_key,\n distinct=True):\n require_string(path, 'Path or URL to VCF')\n parsed_path = parse_url_or_path(path)\n if parsed_path.scheme and parsed_path.scheme.lower() != 'file':\n filename, headers = urllib.request.urlretrieve(path)\n try:\n new_filename = '%s.%s' % (filename, parsed_path.path.split('.')[-1]\n )\n os.rename(filename, new_filename)\n filename = new_filename\n return load_vcf(filename, genome=genome, reference_vcf_key=\n reference_vcf_key, only_passing=only_passing,\n allow_extended_nucleotides=allow_extended_nucleotides,\n include_info=include_info, chunk_size=chunk_size,\n max_variants=max_variants, sort_key=sort_key, distinct=distinct\n )\n finally:\n logger.info('Removing temporary file: %s', filename)\n os.unlink(filename)\n handle = PyVCFReaderFromPathOrURL(path)\n handle.close()\n genome = infer_genome_from_vcf(genome, handle.vcf_reader, reference_vcf_key\n )\n df_iterator = read_vcf_into_dataframe(path, include_info=include_info,\n sample_names=handle.vcf_reader.samples if include_info else None,\n chunk_size=chunk_size)\n if include_info:\n\n def x(unparsed_sample_info_strings, format_string):\n return pyvcf_calls_to_sample_info_list(handle.vcf_reader.\n _parse_samples(unparsed_sample_info_strings, format_string,\n None))\n else:\n sample_info_parser = None\n return dataframes_to_variant_collection(df_iterator, source_path=path,\n info_parser=handle.vcf_reader._parse_info if include_info else None,\n only_passing=only_passing, max_variants=max_variants, sample_names=\n handle.vcf_reader.samples if include_info else None,\n sample_info_parser=sample_info_parser, variant_kwargs={'ensembl':\n genome, 'allow_extended_nucleotides': allow_extended_nucleotides},\n variant_collection_kwargs={'sort_key': sort_key, 'distinct': distinct})\n", "role": "user" }, { "content": "load_vcf", "role": "assistant" } ]
def x(calls): return OrderedDict((call.sample, call.data._asdict()) for call in calls)
pyvcf_calls_to_sample_info_list
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(calls):\n return OrderedDict((call.sample, call.data._asdict()) for call in calls)\n", "role": "user" }, { "content": "pyvcf_calls_to_sample_info_list", "role": "assistant" } ]
def x(dataframes, source_path, info_parser=None, only_passing=True, max_variants=None, sample_names=None, sample_info_parser=None, variant_kwargs={}, variant_collection_kwargs={}): expected_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER' ] + (['INFO'] if info_parser else []) if info_parser and sample_names: if sample_info_parser is None: raise TypeError( 'Must specify sample_info_parser if specifying sample_names') expected_columns.append('FORMAT') expected_columns.extend(sample_names) variants = [] metadata = {} try: for chunk in dataframes: assert chunk.columns.tolist( ) == expected_columns, 'dataframe columns (%s) do not match expected columns (%s)' % ( chunk.columns, expected_columns) for tpl in chunk.itertuples(): i, chrom, pos, id_, ref, alts, qual, flter = tpl[:8] if flter == '.': flter = None elif flter == 'PASS': flter = [] elif only_passing: continue else: flter = flter.split(';') if id_ == '.': id_ = None qual = float(qual) if qual != '.' else None alt_num = 0 info = sample_info = None for alt in alts.split(','): if alt != '.': if info_parser is not None and info is None: info = info_parser(tpl[8]) if sample_names: sample_info = sample_info_parser(list(tpl[ 10:]), tpl[9]) variant = Variant(chrom, int(pos), ref, alt, ** variant_kwargs) variants.append(variant) metadata[variant] = {'id': id_, 'qual': qual, 'filter': flter, 'info': info, 'sample_info': sample_info, 'alt_allele_index': alt_num} if max_variants and len(variants) > max_variants: raise StopIteration alt_num += 1 except StopIteration: pass return VariantCollection(variants=variants, source_to_metadata_dict={ source_path: metadata}, **variant_collection_kwargs)
dataframes_to_variant_collection
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(dataframes, source_path, info_parser=None, only_passing=True,\n max_variants=None, sample_names=None, sample_info_parser=None,\n variant_kwargs={}, variant_collection_kwargs={}):\n expected_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER'\n ] + (['INFO'] if info_parser else [])\n if info_parser and sample_names:\n if sample_info_parser is None:\n raise TypeError(\n 'Must specify sample_info_parser if specifying sample_names')\n expected_columns.append('FORMAT')\n expected_columns.extend(sample_names)\n variants = []\n metadata = {}\n try:\n for chunk in dataframes:\n assert chunk.columns.tolist(\n ) == expected_columns, 'dataframe columns (%s) do not match expected columns (%s)' % (\n chunk.columns, expected_columns)\n for tpl in chunk.itertuples():\n i, chrom, pos, id_, ref, alts, qual, flter = tpl[:8]\n if flter == '.':\n flter = None\n elif flter == 'PASS':\n flter = []\n elif only_passing:\n continue\n else:\n flter = flter.split(';')\n if id_ == '.':\n id_ = None\n qual = float(qual) if qual != '.' else None\n alt_num = 0\n info = sample_info = None\n for alt in alts.split(','):\n if alt != '.':\n if info_parser is not None and info is None:\n info = info_parser(tpl[8])\n if sample_names:\n sample_info = sample_info_parser(list(tpl[\n 10:]), tpl[9])\n variant = Variant(chrom, int(pos), ref, alt, **\n variant_kwargs)\n variants.append(variant)\n metadata[variant] = {'id': id_, 'qual': qual,\n 'filter': flter, 'info': info, 'sample_info':\n sample_info, 'alt_allele_index': alt_num}\n if max_variants and len(variants) > max_variants:\n raise StopIteration\n alt_num += 1\n except StopIteration:\n pass\n return VariantCollection(variants=variants, source_to_metadata_dict={\n source_path: metadata}, **variant_collection_kwargs)\n", "role": "user" }, { "content": "dataframes_to_variant_collection", "role": "assistant" } ]
def x(path, include_info=False, sample_names=None, chunk_size=None): vcf_field_types = OrderedDict() vcf_field_types['CHROM'] = str vcf_field_types['POS'] = int vcf_field_types['ID'] = str vcf_field_types['REF'] = str vcf_field_types['ALT'] = str vcf_field_types['QUAL'] = str vcf_field_types['FILTER'] = str if include_info: vcf_field_types['INFO'] = str if sample_names: vcf_field_types['FORMAT'] = str for name in sample_names: vcf_field_types[name] = str parsed_path = parse_url_or_path(path) if not parsed_path.scheme or parsed_path.scheme.lower() == 'file': path = parsed_path.path else: raise NotImplementedError('Only local files are supported.') compression = None if path.endswith('.gz'): compression = 'gzip' elif path.endswith('.bz2'): compression = 'bz2' reader = pandas.read_table(path, compression=compression, comment='#', chunksize=chunk_size, dtype=vcf_field_types, names=list( vcf_field_types), usecols=range(len(vcf_field_types))) return reader
read_vcf_into_dataframe
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path, include_info=False, sample_names=None, chunk_size=None):\n vcf_field_types = OrderedDict()\n vcf_field_types['CHROM'] = str\n vcf_field_types['POS'] = int\n vcf_field_types['ID'] = str\n vcf_field_types['REF'] = str\n vcf_field_types['ALT'] = str\n vcf_field_types['QUAL'] = str\n vcf_field_types['FILTER'] = str\n if include_info:\n vcf_field_types['INFO'] = str\n if sample_names:\n vcf_field_types['FORMAT'] = str\n for name in sample_names:\n vcf_field_types[name] = str\n parsed_path = parse_url_or_path(path)\n if not parsed_path.scheme or parsed_path.scheme.lower() == 'file':\n path = parsed_path.path\n else:\n raise NotImplementedError('Only local files are supported.')\n compression = None\n if path.endswith('.gz'):\n compression = 'gzip'\n elif path.endswith('.bz2'):\n compression = 'bz2'\n reader = pandas.read_table(path, compression=compression, comment='#',\n chunksize=chunk_size, dtype=vcf_field_types, names=list(\n vcf_field_types), usecols=range(len(vcf_field_types)))\n return reader\n", "role": "user" }, { "content": "read_vcf_into_dataframe", "role": "assistant" } ]
def x(stream): dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = '' for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split('\n') previous = lines.pop() for line in lines: yield line yield previous
stream_gzip_decompress_lines
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(stream):\n dec = zlib.decompressobj(zlib.MAX_WBITS | 16)\n previous = ''\n for compressed_chunk in stream:\n chunk = dec.decompress(compressed_chunk).decode()\n if chunk:\n lines = (previous + chunk).split('\\n')\n previous = lines.pop()\n for line in lines:\n yield line\n yield previous\n", "role": "user" }, { "content": "stream_gzip_decompress_lines", "role": "assistant" } ]
def x(genome, vcf_reader, reference_vcf_key): if genome: return infer_genome(genome) elif reference_vcf_key not in vcf_reader.metadata: raise ValueError('Unable to infer reference genome for %s' % ( vcf_reader.filename,)) else: reference_path = vcf_reader.metadata[reference_vcf_key] return infer_genome(reference_path)
infer_genome_from_vcf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(genome, vcf_reader, reference_vcf_key):\n if genome:\n return infer_genome(genome)\n elif reference_vcf_key not in vcf_reader.metadata:\n raise ValueError('Unable to infer reference genome for %s' % (\n vcf_reader.filename,))\n else:\n reference_path = vcf_reader.metadata[reference_vcf_key]\n return infer_genome(reference_path)\n", "role": "user" }, { "content": "infer_genome_from_vcf", "role": "assistant" } ]
def x(mutated_codon_index, sequence_from_mutated_codon, variant, transcript): assert transcript.protein_sequence is not None, 'Expect transcript %s to have protein sequence' % transcript original_protein_sequence = transcript.protein_sequence original_protein_length = len(original_protein_sequence) mutant_protein_suffix = translate(nucleotide_sequence= sequence_from_mutated_codon, first_codon_is_start=False, to_stop= True, truncate=True) if mutated_codon_index == 0: return StartLoss(variant=variant, transcript=transcript) _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix(ref =original_protein_sequence[mutated_codon_index:], alt= mutant_protein_suffix) n_unchanged_amino_acids = len(unchanged_amino_acids) offset_to_first_different_amino_acid = (mutated_codon_index + n_unchanged_amino_acids) if n_unchanged_amino_acids == 0: aa_ref = '' else: aa_ref = original_protein_sequence[-n_unchanged_amino_acids:] if offset_to_first_different_amino_acid >= original_protein_length: if len(mutant_protein_suffix) == 0: return Silent(variant=variant, transcript=transcript, aa_pos= mutated_codon_index, aa_ref=aa_ref) else: return StopLoss(variant=variant, transcript=transcript, aa_ref= aa_ref, aa_alt=mutant_protein_suffix) aa_ref = original_protein_sequence[offset_to_first_different_amino_acid] if len(mutant_protein_suffix) == 0: return FrameShiftTruncation(variant=variant, transcript=transcript, stop_codon_offset=offset_to_first_different_amino_acid) return FrameShift(variant=variant, transcript=transcript, aa_mutation_start_offset=offset_to_first_different_amino_acid, shifted_sequence=str(mutant_protein_suffix))
create_frameshift_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mutated_codon_index, sequence_from_mutated_codon, variant, transcript):\n assert transcript.protein_sequence is not None, 'Expect transcript %s to have protein sequence' % transcript\n original_protein_sequence = transcript.protein_sequence\n original_protein_length = len(original_protein_sequence)\n mutant_protein_suffix = translate(nucleotide_sequence=\n sequence_from_mutated_codon, first_codon_is_start=False, to_stop=\n True, truncate=True)\n if mutated_codon_index == 0:\n return StartLoss(variant=variant, transcript=transcript)\n _, mutant_protein_suffix, unchanged_amino_acids = trim_shared_prefix(ref\n =original_protein_sequence[mutated_codon_index:], alt=\n mutant_protein_suffix)\n n_unchanged_amino_acids = len(unchanged_amino_acids)\n offset_to_first_different_amino_acid = (mutated_codon_index +\n n_unchanged_amino_acids)\n if n_unchanged_amino_acids == 0:\n aa_ref = ''\n else:\n aa_ref = original_protein_sequence[-n_unchanged_amino_acids:]\n if offset_to_first_different_amino_acid >= original_protein_length:\n if len(mutant_protein_suffix) == 0:\n return Silent(variant=variant, transcript=transcript, aa_pos=\n mutated_codon_index, aa_ref=aa_ref)\n else:\n return StopLoss(variant=variant, transcript=transcript, aa_ref=\n aa_ref, aa_alt=mutant_protein_suffix)\n aa_ref = original_protein_sequence[offset_to_first_different_amino_acid]\n if len(mutant_protein_suffix) == 0:\n return FrameShiftTruncation(variant=variant, transcript=transcript,\n stop_codon_offset=offset_to_first_different_amino_acid)\n return FrameShift(variant=variant, transcript=transcript,\n aa_mutation_start_offset=offset_to_first_different_amino_acid,\n shifted_sequence=str(mutant_protein_suffix))\n", "role": "user" }, { "content": "create_frameshift_effect", "role": "assistant" } ]
def x(sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides): coding_sequence_after_insertion = sequence_from_start_codon[ cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = '' elif cds_offset_before_insertion % 3 == 1: mutated_codon_index = cds_offset_before_insertion // 3 nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: mutated_codon_index = cds_offset_before_insertion // 3 nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion] sequence_from_mutated_codon = (nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
cdna_codon_sequence_after_insertion_frameshift
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence_from_start_codon, cds_offset_before_insertion,\n inserted_nucleotides):\n coding_sequence_after_insertion = sequence_from_start_codon[\n cds_offset_before_insertion + 1:]\n if cds_offset_before_insertion % 3 == 2:\n mutated_codon_index = cds_offset_before_insertion // 3 + 1\n nucleotides_before = ''\n elif cds_offset_before_insertion % 3 == 1:\n mutated_codon_index = cds_offset_before_insertion // 3\n nucleotides_before = sequence_from_start_codon[\n cds_offset_before_insertion - 1:cds_offset_before_insertion + 1]\n elif cds_offset_before_insertion % 3 == 0:\n mutated_codon_index = cds_offset_before_insertion // 3\n nucleotides_before = sequence_from_start_codon[\n cds_offset_before_insertion]\n sequence_from_mutated_codon = (nucleotides_before +\n inserted_nucleotides + coding_sequence_after_insertion)\n return mutated_codon_index, sequence_from_mutated_codon\n", "role": "user" }, { "content": "cdna_codon_sequence_after_insertion_frameshift", "role": "assistant" } ]
def x(sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt ): mutated_codon_index = cds_offset // 3 sequence_after_mutated_codon = sequence_from_start_codon[ mutated_codon_index * 3:] offset_into_mutated_codon = cds_offset % 3 sequence_from_mutated_codon = substitute(sequence= sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref =trimmed_cdna_ref, alt=trimmed_cdna_alt) return mutated_codon_index, sequence_from_mutated_codon
cdna_codon_sequence_after_deletion_or_substitution_frameshift
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sequence_from_start_codon, cds_offset, trimmed_cdna_ref, trimmed_cdna_alt\n ):\n mutated_codon_index = cds_offset // 3\n sequence_after_mutated_codon = sequence_from_start_codon[\n mutated_codon_index * 3:]\n offset_into_mutated_codon = cds_offset % 3\n sequence_from_mutated_codon = substitute(sequence=\n sequence_after_mutated_codon, offset=offset_into_mutated_codon, ref\n =trimmed_cdna_ref, alt=trimmed_cdna_alt)\n return mutated_codon_index, sequence_from_mutated_codon\n", "role": "user" }, { "content": "cdna_codon_sequence_after_deletion_or_substitution_frameshift", "role": "assistant" } ]
def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset, sequence_from_start_codon): if len(trimmed_cdna_ref) != 0: mutated_codon_index, sequence_from_mutated_codon = ( cdna_codon_sequence_after_deletion_or_substitution_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset =cds_offset, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt)) else: mutated_codon_index, sequence_from_mutated_codon = ( cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon=sequence_from_start_codon, cds_offset_before_insertion=cds_offset, inserted_nucleotides= trimmed_cdna_alt)) return create_frameshift_effect(mutated_codon_index=mutated_codon_index, sequence_from_mutated_codon=sequence_from_mutated_codon, variant= variant, transcript=transcript)
predict_frameshift_coding_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, cds_offset,\n sequence_from_start_codon):\n if len(trimmed_cdna_ref) != 0:\n mutated_codon_index, sequence_from_mutated_codon = (\n cdna_codon_sequence_after_deletion_or_substitution_frameshift(\n sequence_from_start_codon=sequence_from_start_codon, cds_offset\n =cds_offset, trimmed_cdna_ref=trimmed_cdna_ref,\n trimmed_cdna_alt=trimmed_cdna_alt))\n else:\n mutated_codon_index, sequence_from_mutated_codon = (\n cdna_codon_sequence_after_insertion_frameshift(\n sequence_from_start_codon=sequence_from_start_codon,\n cds_offset_before_insertion=cds_offset, inserted_nucleotides=\n trimmed_cdna_alt))\n return create_frameshift_effect(mutated_codon_index=mutated_codon_index,\n sequence_from_mutated_codon=sequence_from_mutated_codon, variant=\n variant, transcript=transcript)\n", "role": "user" }, { "content": "predict_frameshift_coding_effect", "role": "assistant" } ]
def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, transcript_offset): if not transcript.complete: raise ValueError( "Can't annotate coding effect for %s on incomplete transcript %s" % (variant, transcript)) sequence = transcript.sequence n_ref = len(trimmed_cdna_ref) n_alt = len(trimmed_cdna_alt) ref_nucleotides_from_transcript = str(sequence[transcript_offset: transcript_offset + n_ref]) assert ref_nucleotides_from_transcript == trimmed_cdna_ref, "%s: expected ref '%s' at offset %d of %s, transcript has '%s'" % ( variant, trimmed_cdna_ref, transcript_offset, transcript, ref_nucleotides_from_transcript) start_codon_offset = transcript.first_start_codon_spliced_offset stop_codon_offset = transcript.last_stop_codon_spliced_offset cds_len = stop_codon_offset - start_codon_offset + 1 if cds_len < 3: raise ValueError("Coding sequence for %s is too short: '%s'" % ( transcript, transcript.sequence[start_codon_offset: stop_codon_offset + 1])) if n_ref == 0 and transcript.strand == '-': cds_offset = transcript_offset - start_codon_offset - 1 else: cds_offset = transcript_offset - start_codon_offset assert cds_offset < cds_len, 'Expected CDS offset (%d) < |CDS| (%d) for %s on %s' % ( cds_offset, cds_len, variant, transcript) sequence_from_start_codon = str(sequence[start_codon_offset:]) if (n_ref - n_alt) % 3 == 0: return predict_in_frame_coding_effect(variant=variant, transcript= transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt =trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon) else: return predict_frameshift_coding_effect(variant=variant, transcript =transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset, sequence_from_start_codon=sequence_from_start_codon)
predict_variant_coding_effect_on_transcript
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt,\n transcript_offset):\n if not transcript.complete:\n raise ValueError(\n \"Can't annotate coding effect for %s on incomplete transcript %s\" %\n (variant, transcript))\n sequence = transcript.sequence\n n_ref = len(trimmed_cdna_ref)\n n_alt = len(trimmed_cdna_alt)\n ref_nucleotides_from_transcript = str(sequence[transcript_offset:\n transcript_offset + n_ref])\n assert ref_nucleotides_from_transcript == trimmed_cdna_ref, \"%s: expected ref '%s' at offset %d of %s, transcript has '%s'\" % (\n variant, trimmed_cdna_ref, transcript_offset, transcript,\n ref_nucleotides_from_transcript)\n start_codon_offset = transcript.first_start_codon_spliced_offset\n stop_codon_offset = transcript.last_stop_codon_spliced_offset\n cds_len = stop_codon_offset - start_codon_offset + 1\n if cds_len < 3:\n raise ValueError(\"Coding sequence for %s is too short: '%s'\" % (\n transcript, transcript.sequence[start_codon_offset:\n stop_codon_offset + 1]))\n if n_ref == 0 and transcript.strand == '-':\n cds_offset = transcript_offset - start_codon_offset - 1\n else:\n cds_offset = transcript_offset - start_codon_offset\n assert cds_offset < cds_len, 'Expected CDS offset (%d) < |CDS| (%d) for %s on %s' % (\n cds_offset, cds_len, variant, transcript)\n sequence_from_start_codon = str(sequence[start_codon_offset:])\n if (n_ref - n_alt) % 3 == 0:\n return predict_in_frame_coding_effect(variant=variant, transcript=\n transcript, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt\n =trimmed_cdna_alt, cds_offset=cds_offset,\n sequence_from_start_codon=sequence_from_start_codon)\n else:\n return predict_frameshift_coding_effect(variant=variant, transcript\n =transcript, trimmed_cdna_ref=trimmed_cdna_ref,\n trimmed_cdna_alt=trimmed_cdna_alt, cds_offset=cds_offset,\n sequence_from_start_codon=sequence_from_start_codon)\n", "role": "user" }, { "content": "predict_variant_coding_effect_on_transcript", "role": "assistant" } ]
def x(variant, raise_on_error=False): if len(variant.gene_ids) == 0: effects = [Intergenic(variant)] else: effects = [] transcripts_grouped_by_gene = groupby_field(variant.transcripts, 'gene_id') for gene_id in sorted(variant.gene_ids): if gene_id not in transcripts_grouped_by_gene: gene = variant.ensembl.gene_by_id(gene_id) effects.append(Intragenic(variant, gene)) else: for transcript in transcripts_grouped_by_gene[gene_id]: if raise_on_error: effect = predict_variant_effect_on_transcript(variant =variant, transcript=transcript) else: effect = ( predict_variant_effect_on_transcript_or_failure (variant=variant, transcript=transcript)) effects.append(effect) return EffectCollection(effects)
predict_variant_effects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, raise_on_error=False):\n if len(variant.gene_ids) == 0:\n effects = [Intergenic(variant)]\n else:\n effects = []\n transcripts_grouped_by_gene = groupby_field(variant.transcripts,\n 'gene_id')\n for gene_id in sorted(variant.gene_ids):\n if gene_id not in transcripts_grouped_by_gene:\n gene = variant.ensembl.gene_by_id(gene_id)\n effects.append(Intragenic(variant, gene))\n else:\n for transcript in transcripts_grouped_by_gene[gene_id]:\n if raise_on_error:\n effect = predict_variant_effect_on_transcript(variant\n =variant, transcript=transcript)\n else:\n effect = (\n predict_variant_effect_on_transcript_or_failure\n (variant=variant, transcript=transcript))\n effects.append(effect)\n return EffectCollection(effects)\n", "role": "user" }, { "content": "predict_variant_effects", "role": "assistant" } ]
def x(variant, transcript): try: return predict_variant_effect_on_transcript(variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn('Encountered error annotating %s for %s: %s', variant, transcript, error) return Failure(variant, transcript)
predict_variant_effect_on_transcript_or_failure
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, transcript):\n try:\n return predict_variant_effect_on_transcript(variant=variant,\n transcript=transcript)\n except (AssertionError, ValueError) as error:\n logger.warn('Encountered error annotating %s for %s: %s', variant,\n transcript, error)\n return Failure(variant, transcript)\n", "role": "user" }, { "content": "predict_variant_effect_on_transcript_or_failure", "role": "assistant" } ]
def x(variant, transcript): if transcript.__class__ is not Transcript: raise TypeError('Expected %s : %s to have type Transcript' % ( transcript, type(transcript))) if not transcript.is_protein_coding: return NoncodingTranscript(variant, transcript) if not transcript.complete: return IncompleteTranscript(variant, transcript) is_insertion = variant.is_insertion completely_lost_exons = [] overlapping_exon_numbers_and_exons = [] distance_to_nearest_exon = float('inf') start_in_exon = False end_in_exon = False nearest_exon = None variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end for i, exon in enumerate(transcript.exons): if variant_start <= exon.start and variant_end >= exon.end: completely_lost_exons.append(exon) if is_insertion and exon.strand == '+' and variant_end == exon.end: distance = 1 elif is_insertion and exon.strand == '-' and variant_start == exon.start: distance = 1 else: distance = exon.distance_to_interval(variant_start, variant_end) if distance == 0: overlapping_exon_numbers_and_exons.append((i + 1, exon)) if exon.start <= variant_start <= exon.end: start_in_exon = True if exon.end >= variant_end >= exon.start: end_in_exon = True elif distance < distance_to_nearest_exon: distance_to_nearest_exon = distance nearest_exon = exon if len(overlapping_exon_numbers_and_exons) == 0: intronic_effect_class = choose_intronic_effect_class(variant= variant, nearest_exon=nearest_exon, distance_to_exon= distance_to_nearest_exon) return intronic_effect_class(variant=variant, transcript=transcript, nearest_exon=nearest_exon, distance_to_exon= distance_to_nearest_exon) elif len(completely_lost_exons) > 0 or len( overlapping_exon_numbers_and_exons) > 1: exons = [exon for _, exon in overlapping_exon_numbers_and_exons] return ExonLoss(variant, transcript, exons) assert len(overlapping_exon_numbers_and_exons) == 1 exon_number, exon = overlapping_exon_numbers_and_exons[0] exonic_effect_annotation = exonic_transcript_effect(variant, exon, exon_number, transcript) if start_in_exon and end_in_exon: return exonic_effect_annotation elif isinstance(exonic_effect_annotation, ExonicSpliceSite): return exonic_effect_annotation return ExonicSpliceSite(variant=variant, transcript=transcript, exon= exon, alternate_effect=exonic_effect_annotation)
predict_variant_effect_on_transcript
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, transcript):\n if transcript.__class__ is not Transcript:\n raise TypeError('Expected %s : %s to have type Transcript' % (\n transcript, type(transcript)))\n if not transcript.is_protein_coding:\n return NoncodingTranscript(variant, transcript)\n if not transcript.complete:\n return IncompleteTranscript(variant, transcript)\n is_insertion = variant.is_insertion\n completely_lost_exons = []\n overlapping_exon_numbers_and_exons = []\n distance_to_nearest_exon = float('inf')\n start_in_exon = False\n end_in_exon = False\n nearest_exon = None\n variant_start = variant.trimmed_base1_start\n variant_end = variant.trimmed_base1_end\n for i, exon in enumerate(transcript.exons):\n if variant_start <= exon.start and variant_end >= exon.end:\n completely_lost_exons.append(exon)\n if is_insertion and exon.strand == '+' and variant_end == exon.end:\n distance = 1\n elif is_insertion and exon.strand == '-' and variant_start == exon.start:\n distance = 1\n else:\n distance = exon.distance_to_interval(variant_start, variant_end)\n if distance == 0:\n overlapping_exon_numbers_and_exons.append((i + 1, exon))\n if exon.start <= variant_start <= exon.end:\n start_in_exon = True\n if exon.end >= variant_end >= exon.start:\n end_in_exon = True\n elif distance < distance_to_nearest_exon:\n distance_to_nearest_exon = distance\n nearest_exon = exon\n if len(overlapping_exon_numbers_and_exons) == 0:\n intronic_effect_class = choose_intronic_effect_class(variant=\n variant, nearest_exon=nearest_exon, distance_to_exon=\n distance_to_nearest_exon)\n return intronic_effect_class(variant=variant, transcript=transcript,\n nearest_exon=nearest_exon, distance_to_exon=\n distance_to_nearest_exon)\n elif len(completely_lost_exons) > 0 or len(\n overlapping_exon_numbers_and_exons) > 1:\n exons = [exon for _, exon in overlapping_exon_numbers_and_exons]\n return ExonLoss(variant, transcript, exons)\n assert len(overlapping_exon_numbers_and_exons) == 1\n exon_number, exon = overlapping_exon_numbers_and_exons[0]\n exonic_effect_annotation = exonic_transcript_effect(variant, exon,\n exon_number, transcript)\n if start_in_exon and end_in_exon:\n return exonic_effect_annotation\n elif isinstance(exonic_effect_annotation, ExonicSpliceSite):\n return exonic_effect_annotation\n return ExonicSpliceSite(variant=variant, transcript=transcript, exon=\n exon, alternate_effect=exonic_effect_annotation)\n", "role": "user" }, { "content": "predict_variant_effect_on_transcript", "role": "assistant" } ]
def x(variant, nearest_exon, distance_to_exon): assert distance_to_exon > 0, 'Expected intronic effect to have distance_to_exon > 0, got %d' % ( distance_to_exon,) if nearest_exon.strand == '+': start_before = variant.trimmed_base1_start < nearest_exon.start start_same = variant.trimmed_base1_start == nearest_exon.start before_exon = start_before or variant.is_insertion and start_same else: end_after = variant.trimmed_base1_end > nearest_exon.end end_same = variant.trimmed_base1_end == nearest_exon.end before_exon = end_after or variant.is_insertion and end_same if distance_to_exon <= 2: if before_exon: return SpliceAcceptor else: return SpliceDonor elif not before_exon and distance_to_exon <= 6: return IntronicSpliceSite elif before_exon and distance_to_exon <= 3: return IntronicSpliceSite else: return Intronic
choose_intronic_effect_class
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, nearest_exon, distance_to_exon):\n assert distance_to_exon > 0, 'Expected intronic effect to have distance_to_exon > 0, got %d' % (\n distance_to_exon,)\n if nearest_exon.strand == '+':\n start_before = variant.trimmed_base1_start < nearest_exon.start\n start_same = variant.trimmed_base1_start == nearest_exon.start\n before_exon = start_before or variant.is_insertion and start_same\n else:\n end_after = variant.trimmed_base1_end > nearest_exon.end\n end_same = variant.trimmed_base1_end == nearest_exon.end\n before_exon = end_after or variant.is_insertion and end_same\n if distance_to_exon <= 2:\n if before_exon:\n return SpliceAcceptor\n else:\n return SpliceDonor\n elif not before_exon and distance_to_exon <= 6:\n return IntronicSpliceSite\n elif before_exon and distance_to_exon <= 3:\n return IntronicSpliceSite\n else:\n return Intronic\n", "role": "user" }, { "content": "choose_intronic_effect_class", "role": "assistant" } ]
def x(variant, exon, exon_number, transcript): genome_ref = variant.trimmed_ref genome_alt = variant.trimmed_alt variant_start = variant.trimmed_base1_start variant_end = variant.trimmed_base1_end if variant_start < exon.start: logger.info('Mutation in variant %s starts before exon %s', variant, exon) assert len(genome_ref) > 0, 'Unexpected insertion into intron' n_skip_start = exon.start - variant_start genome_ref = genome_ref[n_skip_start:] genome_alt = genome_alt[n_skip_start:] genome_start = exon.start else: genome_start = variant_start if variant_end > exon.end: logger.info('Mutation in variant %s ends after exon %s', variant, exon) n_skip_end = variant_end - exon.end genome_ref = genome_ref[:-n_skip_end] genome_alt = genome_alt[:len(genome_ref)] genome_end = exon.end else: genome_end = variant_end transcript_offset = interval_offset_on_transcript(genome_start, genome_end, transcript) if transcript.on_backward_strand: cdna_ref = reverse_complement(genome_ref) cdna_alt = reverse_complement(genome_alt) else: cdna_ref = genome_ref cdna_alt = genome_alt n_ref = len(cdna_ref) expected_ref = str(transcript.sequence[transcript_offset: transcript_offset + n_ref]) if cdna_ref != expected_ref: raise ValueError( "Found ref nucleotides '%s' in sequence of %s at offset %d (chromosome positions %d:%d) but variant %s has '%s'" % (expected_ref, transcript, transcript_offset, genome_start, genome_end, variant, cdna_ref)) utr5_length = min(transcript.start_codon_spliced_offsets) if utr5_length > transcript_offset: if utr5_length < transcript_offset + n_ref: return StartLoss(variant, transcript) else: return FivePrimeUTR(variant, transcript) utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1 if transcript_offset >= utr3_offset: return ThreePrimeUTR(variant, transcript) exon_start_offset = interval_offset_on_transcript(exon.start, exon.end, transcript) exon_end_offset = exon_start_offset + len(exon) - 1 coding_effect_annotation = predict_variant_coding_effect_on_transcript( variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref, trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset) if changes_exonic_splice_site(transcript=transcript, transcript_ref= cdna_ref, transcript_alt=cdna_alt, transcript_offset= transcript_offset, exon_start_offset=exon_start_offset, exon_end_offset=exon_end_offset, exon_number=exon_number): return ExonicSpliceSite(variant=variant, transcript=transcript, exon=exon, alternate_effect=coding_effect_annotation) return coding_effect_annotation
exonic_transcript_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variant, exon, exon_number, transcript):\n genome_ref = variant.trimmed_ref\n genome_alt = variant.trimmed_alt\n variant_start = variant.trimmed_base1_start\n variant_end = variant.trimmed_base1_end\n if variant_start < exon.start:\n logger.info('Mutation in variant %s starts before exon %s', variant,\n exon)\n assert len(genome_ref) > 0, 'Unexpected insertion into intron'\n n_skip_start = exon.start - variant_start\n genome_ref = genome_ref[n_skip_start:]\n genome_alt = genome_alt[n_skip_start:]\n genome_start = exon.start\n else:\n genome_start = variant_start\n if variant_end > exon.end:\n logger.info('Mutation in variant %s ends after exon %s', variant, exon)\n n_skip_end = variant_end - exon.end\n genome_ref = genome_ref[:-n_skip_end]\n genome_alt = genome_alt[:len(genome_ref)]\n genome_end = exon.end\n else:\n genome_end = variant_end\n transcript_offset = interval_offset_on_transcript(genome_start,\n genome_end, transcript)\n if transcript.on_backward_strand:\n cdna_ref = reverse_complement(genome_ref)\n cdna_alt = reverse_complement(genome_alt)\n else:\n cdna_ref = genome_ref\n cdna_alt = genome_alt\n n_ref = len(cdna_ref)\n expected_ref = str(transcript.sequence[transcript_offset:\n transcript_offset + n_ref])\n if cdna_ref != expected_ref:\n raise ValueError(\n \"Found ref nucleotides '%s' in sequence of %s at offset %d (chromosome positions %d:%d) but variant %s has '%s'\"\n % (expected_ref, transcript, transcript_offset, genome_start,\n genome_end, variant, cdna_ref))\n utr5_length = min(transcript.start_codon_spliced_offsets)\n if utr5_length > transcript_offset:\n if utr5_length < transcript_offset + n_ref:\n return StartLoss(variant, transcript)\n else:\n return FivePrimeUTR(variant, transcript)\n utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1\n if transcript_offset >= utr3_offset:\n return ThreePrimeUTR(variant, transcript)\n exon_start_offset = interval_offset_on_transcript(exon.start, exon.end,\n transcript)\n exon_end_offset = exon_start_offset + len(exon) - 1\n coding_effect_annotation = predict_variant_coding_effect_on_transcript(\n variant=variant, transcript=transcript, trimmed_cdna_ref=cdna_ref,\n trimmed_cdna_alt=cdna_alt, transcript_offset=transcript_offset)\n if changes_exonic_splice_site(transcript=transcript, transcript_ref=\n cdna_ref, transcript_alt=cdna_alt, transcript_offset=\n transcript_offset, exon_start_offset=exon_start_offset,\n exon_end_offset=exon_end_offset, exon_number=exon_number):\n return ExonicSpliceSite(variant=variant, transcript=transcript,\n exon=exon, alternate_effect=coding_effect_annotation)\n return coding_effect_annotation\n", "role": "user" }, { "content": "exonic_transcript_effect", "role": "assistant" } ]
def x(records, fn, skip_none=False): groups = defaultdict(list) for record in records: value = fn(record) if value is not None or not skip_none: groups[value].append(record) return dict(groups)
apply_groupby
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(records, fn, skip_none=False):\n groups = defaultdict(list)\n for record in records:\n value = fn(record)\n if value is not None or not skip_none:\n groups[value].append(record)\n return dict(groups)\n", "role": "user" }, { "content": "apply_groupby", "role": "assistant" } ]
def x(records, field_name, skip_none=True): return apply_groupby(records, lambda obj: getattr(obj, field_name), skip_none=skip_none)
groupby_field
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(records, field_name, skip_none=True):\n return apply_groupby(records, lambda obj: getattr(obj, field_name),\n skip_none=skip_none)\n", "role": "user" }, { "content": "groupby_field", "role": "assistant" } ]
def x(fn): memoized_values = {} @wraps(fn) def x(*args, **kwargs): key = args, tuple(sorted(kwargs.items())) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
memoize
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(fn):\n memoized_values = {}\n\n @wraps(fn)\n def x(*args, **kwargs):\n key = args, tuple(sorted(kwargs.items()))\n try:\n return memoized_values[key]\n except KeyError:\n memoized_values[key] = fn(*args, **kwargs)\n return memoized_values[key]\n return wrapped_fn\n", "role": "user" }, { "content": "memoize", "role": "assistant" } ]
def x(start, end, transcript): if start > end: raise ValueError( "start_pos %d shouldn't be greater than end_pos %d" % (start, end)) if start > transcript.end: raise ValueError('Range %d:%d starts after transcript %s (%d:%d)' % (start, end, transcript, transcript.start, transcript.end)) if end < transcript.start: raise ValueError('Range %d:%d ends before transcript %s (%d:%d)' % (start, end, transcript, transcript.start, transcript.end)) if start < transcript.start: start = transcript.start if end > transcript.end: end = transcript.end return min(transcript.spliced_offset(start), transcript.spliced_offset(end) )
interval_offset_on_transcript
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(start, end, transcript):\n if start > end:\n raise ValueError(\n \"start_pos %d shouldn't be greater than end_pos %d\" % (start, end))\n if start > transcript.end:\n raise ValueError('Range %d:%d starts after transcript %s (%d:%d)' %\n (start, end, transcript, transcript.start, transcript.end))\n if end < transcript.start:\n raise ValueError('Range %d:%d ends before transcript %s (%d:%d)' %\n (start, end, transcript, transcript.start, transcript.end))\n if start < transcript.start:\n start = transcript.start\n if end > transcript.end:\n end = transcript.end\n return min(transcript.spliced_offset(start), transcript.spliced_offset(end)\n )\n", "role": "user" }, { "content": "interval_offset_on_transcript", "role": "assistant" } ]
def x(variants, variant_to_metadata, out=sys.stdout): def x(key, variant, default='.'): val = variant_to_metadata[variant].get(key) if val is None: return default return val def x(variant): filter_metadata = get_metadata_field('filter', variant) if type(filter_metadata) == list: return 'PASS' if filter_metadata == [] else ';'.join( filter_metadata) else: bad_value_msg = ( 'Filter metadata field took on unexpected value `{}`. Update code to account for this value.' .format(str(filter_metadata))) assert filter_metadata == '.', bad_value_msg return filter_metadata def x(variant): def x(key, val): if val is True: return key if type(val) == list: val = ','.join(map(str, val)) else: val = str(val) return '{}={}'.format(key, val) info_dict = get_metadata_field('info', variant, default={}) if not info_dict: return '.' return ';'.join(build_info_pair(k, v) for k, v in info_dict.items()) def x(variant): sample_info = get_metadata_field('sample_info', variant, default={}) return ':'.join(list(sample_info.values())[0].keys() ) if sample_info else '.' def x(variant): def x(sample): sample_vals = sample_info[sample].values() return ':'.join(build_sample_val(val) for val in sample_vals) def x(sample_val): if type(sample_val) is list: return ','.join(map(str, sample_val)) elif sample_val is not None: return str(sample_val) else: return '.' sample_info = get_metadata_field('sample_info', variant, default={}) return list(build_sample_field(sample) for sample in sample_info) def x(variant, add_sample_info): record = [str(variant.original_contig), str(variant.original_start), get_metadata_field('id', variant), variant.original_ref, variant.original_alt, str(get_metadata_field('qual', variant)), build_filter_field(variant), build_info_field(variant)] if add_sample_info: record.append(build_format_field(variant)) record.extend(build_sample_fields(variant)) return record def x(): def x(): id2variants = defaultdict(list) for variant in variants: variant_id = get_metadata_field('id', variant) id2variants[variant_id].append(variant) return id2variants def x(duplicates): assert len(duplicates) > 0 assert all(v.original_contig == duplicates[0].original_contig and v.original_start == duplicates[0].original_start and v. original_ref == duplicates[0].original_ref for v in duplicates) import copy merged = copy.copy(duplicates[0]) merged.original_alt = ','.join(duplicate.original_alt for duplicate in duplicates) return merged id2variants = construct_id2variants() variants_no_id = id2variants.pop('.', []) merged_variants = list(map(merge_variant_list, id2variants.values()) ) + variants_no_id return sorted(merged_variants, key=lambda v: (str(v.original_contig ), str(v.original_start))) def x(): sample_names = set() for variant in variants: sample_info = get_metadata_field('sample_info', variant, default={} ) addl_sample_names = set(sample_info.keys()) if sample_names and sample_names != addl_sample_names: return [] sample_names.update(addl_sample_names) return list(sample_names) headers = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO'] sample_names = get_sample_names() if sample_names: headers += ['FORMAT'] + sample_names unique_variants_list = merge_duplicate_variants() genome_names = list({v.ensembl.reference_name for v in unique_variants_list}) if len(genome_names) > 1: raise ValueError( 'Cannot create VCF for variants with multiple reference genomes: %s' % (genome_names,)) genome_name = genome_names[0] print('##fileformat=VCFv4.2', file=out) print('##reference=%s' % genome_name, file=out) print('\t'.join(headers), file=out) for variant in unique_variants_list: record = build_vcf_record(variant, add_sample_info=bool(sample_names)) print('\t'.join(record), file=out)
variants_to_vcf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(variants, variant_to_metadata, out=sys.stdout):\n\n def x(key, variant, default='.'):\n val = variant_to_metadata[variant].get(key)\n if val is None:\n return default\n return val\n\n def x(variant):\n filter_metadata = get_metadata_field('filter', variant)\n if type(filter_metadata) == list:\n return 'PASS' if filter_metadata == [] else ';'.join(\n filter_metadata)\n else:\n bad_value_msg = (\n 'Filter metadata field took on unexpected value `{}`. Update code to account for this value.'\n .format(str(filter_metadata)))\n assert filter_metadata == '.', bad_value_msg\n return filter_metadata\n\n def x(variant):\n\n def x(key, val):\n if val is True:\n return key\n if type(val) == list:\n val = ','.join(map(str, val))\n else:\n val = str(val)\n return '{}={}'.format(key, val)\n info_dict = get_metadata_field('info', variant, default={})\n if not info_dict:\n return '.'\n return ';'.join(build_info_pair(k, v) for k, v in info_dict.items())\n\n def x(variant):\n sample_info = get_metadata_field('sample_info', variant, default={})\n return ':'.join(list(sample_info.values())[0].keys()\n ) if sample_info else '.'\n\n def x(variant):\n\n def x(sample):\n sample_vals = sample_info[sample].values()\n return ':'.join(build_sample_val(val) for val in sample_vals)\n\n def x(sample_val):\n if type(sample_val) is list:\n return ','.join(map(str, sample_val))\n elif sample_val is not None:\n return str(sample_val)\n else:\n return '.'\n sample_info = get_metadata_field('sample_info', variant, default={})\n return list(build_sample_field(sample) for sample in sample_info)\n\n def x(variant, add_sample_info):\n record = [str(variant.original_contig), str(variant.original_start),\n get_metadata_field('id', variant), variant.original_ref,\n variant.original_alt, str(get_metadata_field('qual', variant)),\n build_filter_field(variant), build_info_field(variant)]\n if add_sample_info:\n record.append(build_format_field(variant))\n record.extend(build_sample_fields(variant))\n return record\n\n def x():\n\n def x():\n id2variants = defaultdict(list)\n for variant in variants:\n variant_id = get_metadata_field('id', variant)\n id2variants[variant_id].append(variant)\n return id2variants\n\n def x(duplicates):\n assert len(duplicates) > 0\n assert all(v.original_contig == duplicates[0].original_contig and\n v.original_start == duplicates[0].original_start and v.\n original_ref == duplicates[0].original_ref for v in duplicates)\n import copy\n merged = copy.copy(duplicates[0])\n merged.original_alt = ','.join(duplicate.original_alt for\n duplicate in duplicates)\n return merged\n id2variants = construct_id2variants()\n variants_no_id = id2variants.pop('.', [])\n merged_variants = list(map(merge_variant_list, id2variants.values())\n ) + variants_no_id\n return sorted(merged_variants, key=lambda v: (str(v.original_contig\n ), str(v.original_start)))\n\n def x():\n sample_names = set()\n for variant in variants:\n sample_info = get_metadata_field('sample_info', variant, default={}\n )\n addl_sample_names = set(sample_info.keys())\n if sample_names and sample_names != addl_sample_names:\n return []\n sample_names.update(addl_sample_names)\n return list(sample_names)\n headers = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']\n sample_names = get_sample_names()\n if sample_names:\n headers += ['FORMAT'] + sample_names\n unique_variants_list = merge_duplicate_variants()\n genome_names = list({v.ensembl.reference_name for v in\n unique_variants_list})\n if len(genome_names) > 1:\n raise ValueError(\n 'Cannot create VCF for variants with multiple reference genomes: %s'\n % (genome_names,))\n genome_name = genome_names[0]\n print('##fileformat=VCFv4.2', file=out)\n print('##reference=%s' % genome_name, file=out)\n print('\\t'.join(headers), file=out)\n for variant in unique_variants_list:\n record = build_vcf_record(variant, add_sample_info=bool(sample_names))\n print('\\t'.join(record), file=out)\n", "role": "user" }, { "content": "variants_to_vcf", "role": "assistant" } ]
def x(self, transcript_expression_dict, min_expression_value=0.0): return self.filter_above_threshold(key_fn=lambda effect: effect. transcript_id, value_dict=transcript_expression_dict, threshold= min_expression_value)
filter_by_transcript_expression
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, transcript_expression_dict, min_expression_value=0.0):\n return self.filter_above_threshold(key_fn=lambda effect: effect.\n transcript_id, value_dict=transcript_expression_dict, threshold=\n min_expression_value)\n", "role": "user" }, { "content": "filter_by_transcript_expression", "role": "assistant" } ]
def x(self, gene_expression_dict, min_expression_value=0.0): return self.filter_above_threshold(key_fn=lambda effect: effect.gene_id, value_dict=gene_expression_dict, threshold=min_expression_value)
filter_by_gene_expression
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, gene_expression_dict, min_expression_value=0.0):\n return self.filter_above_threshold(key_fn=lambda effect: effect.gene_id,\n value_dict=gene_expression_dict, threshold=min_expression_value)\n", "role": "user" }, { "content": "filter_by_gene_expression", "role": "assistant" } ]
def x(self, min_priority_class): min_priority = transcript_effect_priority_dict[min_priority_class] return self.filter(lambda effect: effect_priority(effect) >= min_priority)
filter_by_effect_priority
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, min_priority_class):\n min_priority = transcript_effect_priority_dict[min_priority_class]\n return self.filter(lambda effect: effect_priority(effect) >= min_priority)\n", "role": "user" }, { "content": "filter_by_effect_priority", "role": "assistant" } ]
def x(self): lines = [] for variant, variant_effects in self.groupby_variant().items(): lines.append('\n%s' % variant) gene_effects_groups = variant_effects.groupby_gene_id() for gene_id, gene_effects in gene_effects_groups.items(): if gene_id: gene_name = variant.ensembl.gene_name_of_gene_id(gene_id) lines.append(' Gene: %s (%s)' % (gene_name, gene_id)) for effect in sorted(gene_effects, key=effect_priority, reverse =True): lines.append(' -- %s' % effect) if len(variant_effects) > 1: best = variant_effects.top_priority_effect() lines.append(' Highest Priority Effect: %s' % best) return '\n'.join(lines)
detailed_string
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n lines = []\n for variant, variant_effects in self.groupby_variant().items():\n lines.append('\\n%s' % variant)\n gene_effects_groups = variant_effects.groupby_gene_id()\n for gene_id, gene_effects in gene_effects_groups.items():\n if gene_id:\n gene_name = variant.ensembl.gene_name_of_gene_id(gene_id)\n lines.append(' Gene: %s (%s)' % (gene_name, gene_id))\n for effect in sorted(gene_effects, key=effect_priority, reverse\n =True):\n lines.append(' -- %s' % effect)\n if len(variant_effects) > 1:\n best = variant_effects.top_priority_effect()\n lines.append(' Highest Priority Effect: %s' % best)\n return '\\n'.join(lines)\n", "role": "user" }, { "content": "detailed_string", "role": "assistant" } ]
def x(self): return OrderedDict((variant, top_priority_effect(variant_effects)) for variant, variant_effects in self.groupby_variant().items())
top_priority_effect_per_variant
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return OrderedDict((variant, top_priority_effect(variant_effects)) for \n variant, variant_effects in self.groupby_variant().items())\n", "role": "user" }, { "content": "top_priority_effect_per_variant", "role": "assistant" } ]
def x(self): return OrderedDict((transcript_id, top_priority_effect(variant_effects) ) for transcript_id, variant_effects in self.groupby_transcript_id( ).items())
top_priority_effect_per_transcript_id
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return OrderedDict((transcript_id, top_priority_effect(variant_effects)\n ) for transcript_id, variant_effects in self.groupby_transcript_id(\n ).items())\n", "role": "user" }, { "content": "top_priority_effect_per_transcript_id", "role": "assistant" } ]
def x(self): return OrderedDict((gene_id, top_priority_effect(variant_effects)) for gene_id, variant_effects in self.groupby_gene_id().items())
top_priority_effect_per_gene_id
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return OrderedDict((gene_id, top_priority_effect(variant_effects)) for \n gene_id, variant_effects in self.groupby_gene_id().items())\n", "role": "user" }, { "content": "top_priority_effect_per_gene_id", "role": "assistant" } ]
def x(self, expression_levels): return OrderedDict((effect, expression_levels.get(effect.transcript.id, 0.0)) for effect in self if effect.transcript is not None)
effect_expression
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, expression_levels):\n return OrderedDict((effect, expression_levels.get(effect.transcript.id,\n 0.0)) for effect in self if effect.transcript is not None)\n", "role": "user" }, { "content": "effect_expression", "role": "assistant" } ]
def x(self, expression_levels): effect_expression_dict = self.effect_expression(expression_levels) if len(effect_expression_dict) == 0: return None def x(effect_fpkm_pair): effect, fpkm = effect_fpkm_pair return fpkm, multi_gene_effect_sort_key(effect) return max(effect_expression_dict.items(), key=key_fn)[0]
top_expression_effect
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, expression_levels):\n effect_expression_dict = self.effect_expression(expression_levels)\n if len(effect_expression_dict) == 0:\n return None\n\n def x(effect_fpkm_pair):\n effect, fpkm = effect_fpkm_pair\n return fpkm, multi_gene_effect_sort_key(effect)\n return max(effect_expression_dict.items(), key=key_fn)[0]\n", "role": "user" }, { "content": "top_expression_effect", "role": "assistant" } ]
def x(self): variant_properties = ['contig', 'start', 'ref', 'alt', 'is_snv', 'is_transversion', 'is_transition'] def x(effect): row = OrderedDict() row['variant'] = str(effect.variant.short_description) for field_name in variant_properties: row[field_name] = getattr(effect.variant, field_name, None) row['gene_id'] = effect.gene_id row['gene_name'] = effect.gene_name row['transcript_id'] = effect.transcript_id row['transcript_name'] = effect.transcript_name row['effect_type'] = effect.__class__.__name__ row['effect'] = effect.short_description return row return pd.DataFrame.from_records([row_from_effect(effect) for effect in self])
to_dataframe
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n variant_properties = ['contig', 'start', 'ref', 'alt', 'is_snv',\n 'is_transversion', 'is_transition']\n\n def x(effect):\n row = OrderedDict()\n row['variant'] = str(effect.variant.short_description)\n for field_name in variant_properties:\n row[field_name] = getattr(effect.variant, field_name, None)\n row['gene_id'] = effect.gene_id\n row['gene_name'] = effect.gene_name\n row['transcript_id'] = effect.transcript_id\n row['transcript_name'] = effect.transcript_name\n row['effect_type'] = effect.__class__.__name__\n row['effect'] = effect.short_description\n return row\n return pd.DataFrame.from_records([row_from_effect(effect) for effect in\n self])\n", "role": "user" }, { "content": "to_dataframe", "role": "assistant" } ]
def x(count, genome_name='GRCh38', deletions=True, insertions=True, random_seed=None): rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset( base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [(x + y) for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append('') alt = rng.choice(alt_nucleotides) variant = Variant(transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( 'Unable to generate %d random variants, there may be a problem with PyEnsembl' % count)
random_variants
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(count, genome_name='GRCh38', deletions=True, insertions=True,\n random_seed=None):\n rng = random.Random(random_seed)\n ensembl = genome_for_reference_name(genome_name)\n if ensembl in _transcript_ids_cache:\n transcript_ids = _transcript_ids_cache[ensembl]\n else:\n transcript_ids = ensembl.transcript_ids()\n _transcript_ids_cache[ensembl] = transcript_ids\n variants = []\n for _ in range(count * 100):\n if len(variants) < count:\n transcript_id = rng.choice(transcript_ids)\n transcript = ensembl.transcript_by_id(transcript_id)\n if not transcript.complete:\n continue\n exon = rng.choice(transcript.exons)\n base1_genomic_position = rng.randint(exon.start, exon.end)\n transcript_offset = transcript.spliced_offset(\n base1_genomic_position)\n seq = transcript.sequence\n ref = str(seq[transcript_offset])\n if transcript.on_backward_strand:\n ref = reverse_complement(ref)\n alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref]\n if insertions:\n nucleotide_pairs = [(x + y) for x in STANDARD_NUCLEOTIDES for\n y in STANDARD_NUCLEOTIDES]\n alt_nucleotides.extend(nucleotide_pairs)\n if deletions:\n alt_nucleotides.append('')\n alt = rng.choice(alt_nucleotides)\n variant = Variant(transcript.contig, base1_genomic_position,\n ref=ref, alt=alt, ensembl=ensembl)\n variants.append(variant)\n else:\n return VariantCollection(variants)\n raise ValueError(\n 'Unable to generate %d random variants, there may be a problem with PyEnsembl'\n % count)\n", "role": "user" }, { "content": "random_variants", "role": "assistant" } ]
def x(self, anonymous=True): if anonymous == True: logger.debug('Getting server info via Anonymous BIND on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self. target_server.is_ssl(), get_info=ALL) conn = Connection(server, auto_bind=True) logger.debug('Got server info') else: logger.debug( 'Getting server info via credentials supplied on server %s' % self.target_server.get_host()) server = Server(self.target_server.get_host(), use_ssl=self. target_server.is_ssl(), get_info=ALL) if self.use_sspi == True: conn = self.monkeypatch() else: conn = Connection(self._srv, user=self.login_credential. get_msuser(), password=self.login_credential.get_password(), authentication=self.login_credential.get_authmethod()) logger.debug('Performing BIND to server %s' % self.target_server. get_host()) if not self._con.bind(): if 'description' in self._con.result: raise Exception('Failed to bind to server! Reason: %s' % conn.result['description']) raise Exception('Failed to bind to server! Reason: %s' % conn. result) logger.debug('Connected to server!') return server.info
get_server_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, anonymous=True):\n if anonymous == True:\n logger.debug('Getting server info via Anonymous BIND on server %s' %\n self.target_server.get_host())\n server = Server(self.target_server.get_host(), use_ssl=self.\n target_server.is_ssl(), get_info=ALL)\n conn = Connection(server, auto_bind=True)\n logger.debug('Got server info')\n else:\n logger.debug(\n 'Getting server info via credentials supplied on server %s' %\n self.target_server.get_host())\n server = Server(self.target_server.get_host(), use_ssl=self.\n target_server.is_ssl(), get_info=ALL)\n if self.use_sspi == True:\n conn = self.monkeypatch()\n else:\n conn = Connection(self._srv, user=self.login_credential.\n get_msuser(), password=self.login_credential.get_password(),\n authentication=self.login_credential.get_authmethod())\n logger.debug('Performing BIND to server %s' % self.target_server.\n get_host())\n if not self._con.bind():\n if 'description' in self._con.result:\n raise Exception('Failed to bind to server! Reason: %s' %\n conn.result['description'])\n raise Exception('Failed to bind to server! Reason: %s' % conn.\n result)\n logger.debug('Connected to server!')\n return server.info\n", "role": "user" }, { "content": "get_server_info", "role": "assistant" } ]
def x(self, ldap_filter, attributes): logger.debug('Paged search, filter: %s attributes: %s' % (ldap_filter, ','.join(attributes))) ctr = 0 entries = self._con.extend.standard.paged_search(self._tree, ldap_filter, attributes=attributes, paged_size=self. ldap_query_page_size) for entry in entries: if 'raw_attributes' in entry and 'attributes' in entry: ctr += 1 if ctr % self.ldap_query_page_size == 0: logger.info('New page requested. Result count: %d' % ctr) yield entry
pagedsearch
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, ldap_filter, attributes):\n logger.debug('Paged search, filter: %s attributes: %s' % (ldap_filter,\n ','.join(attributes)))\n ctr = 0\n entries = self._con.extend.standard.paged_search(self._tree,\n ldap_filter, attributes=attributes, paged_size=self.\n ldap_query_page_size)\n for entry in entries:\n if 'raw_attributes' in entry and 'attributes' in entry:\n ctr += 1\n if ctr % self.ldap_query_page_size == 0:\n logger.info('New page requested. Result count: %d' % ctr)\n yield entry\n", "role": "user" }, { "content": "pagedsearch", "role": "assistant" } ]
def x(self): logger.debug('Polling AD for all user objects') ldap_filter = '(objectClass=user)' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
get_all_user_objects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n logger.debug('Polling AD for all user objects')\n ldap_filter = '(objectClass=user)'\n attributes = MSADUser.ATTRS\n for entry in self.pagedsearch(ldap_filter, attributes):\n yield MSADUser.from_ldap(entry, self._ldapinfo)\n logger.debug('Finished polling for entries!')\n", "role": "user" }, { "content": "get_all_user_objects", "role": "assistant" } ]
def x(self, sAMAccountName): logger.debug('Polling AD for user %s' % sAMAccountName) ldap_filter = '(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
get_user
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sAMAccountName):\n logger.debug('Polling AD for user %s' % sAMAccountName)\n ldap_filter = '(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName\n attributes = MSADUser.ATTRS\n for entry in self.pagedsearch(ldap_filter, attributes):\n yield MSADUser.from_ldap(entry, self._ldapinfo)\n logger.debug('Finished polling for entries!')\n", "role": "user" }, { "content": "get_user", "role": "assistant" } ]
def x(self): logger.debug('Polling AD for basic info') ldap_filter = '(distinguishedName=%s)' % self._tree attributes = MSADInfo.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): self._ldapinfo = MSADInfo.from_ldap(entry) return self._ldapinfo logger.debug('Poll finished!')
get_ad_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n logger.debug('Polling AD for basic info')\n ldap_filter = '(distinguishedName=%s)' % self._tree\n attributes = MSADInfo.ATTRS\n for entry in self.pagedsearch(ldap_filter, attributes):\n self._ldapinfo = MSADInfo.from_ldap(entry)\n return self._ldapinfo\n logger.debug('Poll finished!')\n", "role": "user" }, { "content": "get_ad_info", "role": "assistant" } ]
def x(self, include_machine=False): logger.debug( 'Polling AD for all user objects, machine accounts included: %s' % include_machine) if include_machine == True: ldap_filter = '(servicePrincipalName=*)' else: ldap_filter = '(&(servicePrincipalName=*)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
get_all_service_user_objects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, include_machine=False):\n logger.debug(\n 'Polling AD for all user objects, machine accounts included: %s' %\n include_machine)\n if include_machine == True:\n ldap_filter = '(servicePrincipalName=*)'\n else:\n ldap_filter = '(&(servicePrincipalName=*)(!(sAMAccountName = *$)))'\n attributes = MSADUser.ATTRS\n for entry in self.pagedsearch(ldap_filter, attributes):\n yield MSADUser.from_ldap(entry, self._ldapinfo)\n logger.debug('Finished polling for entries!')\n", "role": "user" }, { "content": "get_all_service_user_objects", "role": "assistant" } ]
def x(self, include_machine=False): logger.debug( 'Polling AD for all user objects, machine accounts included: %s' % include_machine) if include_machine == True: ldap_filter = '(userAccountControl:1.2.840.113556.1.4.803:=4194304)' else: ldap_filter = ( '(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))' ) attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
get_all_knoreq_user_objects
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, include_machine=False):\n logger.debug(\n 'Polling AD for all user objects, machine accounts included: %s' %\n include_machine)\n if include_machine == True:\n ldap_filter = '(userAccountControl:1.2.840.113556.1.4.803:=4194304)'\n else:\n ldap_filter = (\n '(&(userAccountControl:1.2.840.113556.1.4.803:=4194304)(!(sAMAccountName = *$)))'\n )\n attributes = MSADUser.ATTRS\n for entry in self.pagedsearch(ldap_filter, attributes):\n yield MSADUser.from_ldap(entry, self._ldapinfo)\n logger.debug('Finished polling for entries!')\n", "role": "user" }, { "content": "get_all_knoreq_user_objects", "role": "assistant" } ]
def x(x): if x == []: return None if isinstance(x, list): return '|'.join(x) if isinstance(x, datetime): return x.isoformat() return x
vn
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(x):\n if x == []:\n return None\n if isinstance(x, list):\n return '|'.join(x)\n if isinstance(x, datetime):\n return x.isoformat()\n return x\n", "role": "user" }, { "content": "vn", "role": "assistant" } ]
def x(item): if hasattr(item, 'tags'): tags = item.tags if isinstance(tags, list): tags_dict = {} for kv_dict in tags: if isinstance(kv_dict, dict ) and 'Key' in kv_dict and 'Value' in kv_dict: tags_dict[kv_dict['Key']] = kv_dict['Value'] return ObjectProxy(item, tags=tags_dict) return item
convert_tags_to_dict
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(item):\n if hasattr(item, 'tags'):\n tags = item.tags\n if isinstance(tags, list):\n tags_dict = {}\n for kv_dict in tags:\n if isinstance(kv_dict, dict\n ) and 'Key' in kv_dict and 'Value' in kv_dict:\n tags_dict[kv_dict['Key']] = kv_dict['Value']\n return ObjectProxy(item, tags=tags_dict)\n return item\n", "role": "user" }, { "content": "convert_tags_to_dict", "role": "assistant" } ]
def x(self, query, meta): try: for table in meta.tables: self.load_table(table) except NoCredentialsError: help_link = ( 'http://boto3.readthedocs.io/en/latest/guide/configuration.html') raise QueryError( 'Unable to locate AWS credential. Please see {0} on how to configure AWS credential.' .format(help_link))
load_tables
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, query, meta):\n try:\n for table in meta.tables:\n self.load_table(table)\n except NoCredentialsError:\n help_link = (\n 'http://boto3.readthedocs.io/en/latest/guide/configuration.html')\n raise QueryError(\n 'Unable to locate AWS credential. Please see {0} on how to configure AWS credential.'\n .format(help_link))\n", "role": "user" }, { "content": "load_tables", "role": "assistant" } ]
def x(self, table): region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name= boto_region_name) if not hasattr(resource, collection_name): raise QueryError('Unknown collection <{0}> of resource <{1}>'. format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
load_table
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, table):\n region = table.database if table.database else self.default_region\n resource_name, collection_name = table.table.split('_', 1)\n boto_region_name = region.replace('_', '-')\n resource = self.boto3_session.resource(resource_name, region_name=\n boto_region_name)\n if not hasattr(resource, collection_name):\n raise QueryError('Unknown collection <{0}> of resource <{1}>'.\n format(collection_name, resource_name))\n self.attach_region(region)\n self.refresh_table(region, table.table, resource, getattr(resource,\n collection_name))\n", "role": "user" }, { "content": "load_table", "role": "assistant" } ]
def x(obj): if isinstance(obj, datetime): return obj.isoformat() if hasattr(obj, 'id'): return jsonify(obj.id) if hasattr(obj, 'name'): return jsonify(obj.name) raise TypeError('{0} is not JSON serializable'.format(obj))
json_serialize
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(obj):\n if isinstance(obj, datetime):\n return obj.isoformat()\n if hasattr(obj, 'id'):\n return jsonify(obj.id)\n if hasattr(obj, 'name'):\n return jsonify(obj.name)\n raise TypeError('{0} is not JSON serializable'.format(obj))\n", "role": "user" }, { "content": "json_serialize", "role": "assistant" } ]
def x(serialized_object, field): if serialized_object is None: return None obj = json.loads(serialized_object) if obj is None: return None if isinstance(field, int): res = obj[field] if 0 <= field < len(obj) else None else: res = obj.get(field) if not isinstance(res, (int, float, string_types)): res = json.dumps(res) return res
json_get
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(serialized_object, field):\n if serialized_object is None:\n return None\n obj = json.loads(serialized_object)\n if obj is None:\n return None\n if isinstance(field, int):\n res = obj[field] if 0 <= field < len(obj) else None\n else:\n res = obj.get(field)\n if not isinstance(res, (int, float, string_types)):\n res = json.dumps(res)\n return res\n", "role": "user" }, { "content": "json_get", "role": "assistant" } ]
def x(db, schema_name, table_name, columns): table = '{0}.{1}'.format(schema_name, table_name ) if schema_name else table_name db.execute('DROP TABLE IF EXISTS {0}'.format(table)) columns_list = ', '.join(columns) db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
create_table
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(db, schema_name, table_name, columns):\n table = '{0}.{1}'.format(schema_name, table_name\n ) if schema_name else table_name\n db.execute('DROP TABLE IF EXISTS {0}'.format(table))\n columns_list = ', '.join(columns)\n db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))\n", "role": "user" }, { "content": "create_table", "role": "assistant" } ]
def x(db, schema_name, table_name, columns, items): table = '{0}.{1}'.format(schema_name, table_name ) if schema_name else table_name columns_list = ', '.join(columns) values_list = ', '.join(['?'] * len(columns)) query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(table =table, columns=columns_list, values=values_list) for item in items: values = [getattr(item, col) for col in columns] db.execute(query, values)
insert_all
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(db, schema_name, table_name, columns, items):\n table = '{0}.{1}'.format(schema_name, table_name\n ) if schema_name else table_name\n columns_list = ', '.join(columns)\n values_list = ', '.join(['?'] * len(columns))\n query = 'INSERT INTO {table} ({columns}) VALUES ({values})'.format(table\n =table, columns=columns_list, values=values_list)\n for item in items:\n values = [getattr(item, col) for col in columns]\n db.execute(query, values)\n", "role": "user" }, { "content": "insert_all", "role": "assistant" } ]
def x(self, *args, **kwargs): from .comic import Comic, ComicDataWrapper return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
get_comics
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *args, **kwargs):\n from .comic import Comic, ComicDataWrapper\n return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)\n", "role": "user" }, { "content": "get_comics", "role": "assistant" } ]
def x(self, *args, **kwargs): from .event import Event, EventDataWrapper return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
get_events
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *args, **kwargs):\n from .event import Event, EventDataWrapper\n return self.get_related_resource(Event, EventDataWrapper, args, kwargs)\n", "role": "user" }, { "content": "get_events", "role": "assistant" } ]
def x(self, *args, **kwargs): from .series import Series, SeriesDataWrapper return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
get_series
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *args, **kwargs):\n from .series import Series, SeriesDataWrapper\n return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)\n", "role": "user" }, { "content": "get_series", "role": "assistant" } ]
def x(self, *args, **kwargs): from .story import Story, StoryDataWrapper return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
get_stories
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, *args, **kwargs):\n from .story import Story, StoryDataWrapper\n return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)\n", "role": "user" }, { "content": "get_stories", "role": "assistant" } ]
def x(_self, _list, _Class): items = [] for item in _list: items.append(_Class(_self.marvel, item)) return items
list_to_instance_list
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(_self, _list, _Class):\n items = []\n for item in _list:\n items.append(_Class(_self.marvel, item))\n return items\n", "role": "user" }, { "content": "list_to_instance_list", "role": "assistant" } ]
def x(_self, _Class, _ClassDataWrapper, *args, **kwargs): url = '%s/%s/%s' % (_self.resource_url(), _self.id, _Class.resource_url()) response = json.loads(_self.marvel._call(url, _self.marvel._params( kwargs)).text) return _ClassDataWrapper(_self.marvel, response)
get_related_resource
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n url = '%s/%s/%s' % (_self.resource_url(), _self.id, _Class.resource_url())\n response = json.loads(_self.marvel._call(url, _self.marvel._params(\n kwargs)).text)\n return _ClassDataWrapper(_self.marvel, response)\n", "role": "user" }, { "content": "get_related_resource", "role": "assistant" } ]
def x(self): self.params['offset'] = str(int(self.params['offset']) + int(self. params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
next
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n self.params['offset'] = str(int(self.params['offset']) + int(self.\n params['limit']))\n return self.marvel.get_characters(self.marvel, (), **self.params)\n", "role": "user" }, { "content": "next", "role": "assistant" } ]
def x(self): self.params['offset'] = str(int(self.params['offset']) - int(self. params['limit'])) return self.marvel.get_characters(self.marvel, (), **self.params)
previous
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n self.params['offset'] = str(int(self.params['offset']) - int(self.\n params['limit']))\n return self.marvel.get_characters(self.marvel, (), **self.params)\n", "role": "user" }, { "content": "previous", "role": "assistant" } ]
def x(self, resource_url, params=None): url = '%s%s' % (self._endpoint(), resource_url) if params: url += '?%s&%s' % (params, self._auth()) else: url += '?%s' % self._auth() return requests.get(url)
call
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, resource_url, params=None):\n url = '%s%s' % (self._endpoint(), resource_url)\n if params:\n url += '?%s&%s' % (params, self._auth())\n else:\n url += '?%s' % self._auth()\n return requests.get(url)\n", "role": "user" }, { "content": "call", "role": "assistant" } ]
def x(self): ts = datetime.datetime.now().strftime('%Y-%m-%d%H:%M:%S') hash_string = hashlib.md5('%s%s%s' % (ts, self.private_key, self. public_key)).hexdigest() return 'ts=%s&apikey=%s&hash=%s' % (ts, self.public_key, hash_string)
auth
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n ts = datetime.datetime.now().strftime('%Y-%m-%d%H:%M:%S')\n hash_string = hashlib.md5('%s%s%s' % (ts, self.private_key, self.\n public_key)).hexdigest()\n return 'ts=%s&apikey=%s&hash=%s' % (ts, self.public_key, hash_string)\n", "role": "user" }, { "content": "auth", "role": "assistant" } ]
def x(self, id): url = '%s/%s' % (Character.resource_url(), id) response = json.loads(self._call(url).text) return CharacterDataWrapper(self, response)
get_character
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, id):\n url = '%s/%s' % (Character.resource_url(), id)\n response = json.loads(self._call(url).text)\n return CharacterDataWrapper(self, response)\n", "role": "user" }, { "content": "get_character", "role": "assistant" } ]