signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def get_variant_by_name(self, name):
|
try:<EOL><INDENT>geno = self.df.loc[:, name].values<EOL>info = self.map_info.loc[name, :]<EOL><DEDENT>except KeyError:<EOL><INDENT>logging.variant_name_not_found(name)<EOL>return []<EOL><DEDENT>else:<EOL><INDENT>return [Genotypes(<EOL>Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]),<EOL>geno,<EOL>reference=info.a2,<EOL>coded=info.a1,<EOL>multiallelic=False,<EOL>)]<EOL><DEDENT>
|
Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
|
f8380:c0:m2
|
def get_samples(self):
|
return self.df.index.tolist()<EOL>
|
Get an ordered collection of the samples in the genotype container.
|
f8380:c0:m3
|
def get_number_samples(self):
|
return self.df.shape[<NUM_LIT:0>]<EOL>
|
Return the number of samples.
|
f8380:c0:m4
|
def get_number_variants(self):
|
return self.df.shape[<NUM_LIT:1>]<EOL>
|
Return the number of variants in the file.
|
f8380:c0:m5
|
def iter_genotypes(self):
|
for v in self.get_vcf():<EOL><INDENT>alleles = {v.REF} | set(v.ALT)<EOL>if self.quality_field:<EOL><INDENT>variant = ImputedVariant(v.ID, v.CHROM, v.POS, alleles,<EOL>getattr(v, self.quality_field))<EOL><DEDENT>else:<EOL><INDENT>variant = Variant(v.ID, v.CHROM, v.POS, alleles)<EOL><DEDENT>for coded_allele, g in self._make_genotypes(v.ALT, v.genotypes):<EOL><INDENT>yield Genotypes(variant, g, v.REF, coded_allele,<EOL>multiallelic=len(v.ALT) > <NUM_LIT:1>)<EOL><DEDENT><DEDENT>
|
Iterates on available markers.
Returns:
Genotypes instances.
|
f8381:c0:m2
|
def iter_variants(self):
|
for v in self.get_vcf():<EOL><INDENT>yield Variant(v.ID, v.CHROM, v.POS, {v.REF} | set(v.ALT))<EOL><DEDENT>
|
Iterate over marker information.
|
f8381:c0:m4
|
def get_variants_in_region(self, chrom, start, end):
|
region = self.get_vcf()(<EOL>"<STR_LIT>".format(chrom, start, end)<EOL>)<EOL>for v in region:<EOL><INDENT>for coded_allele, g in self._make_genotypes(v.ALT, v.genotypes):<EOL><INDENT>variant = Variant(<EOL>v.ID, v.CHROM, v.POS, [v.REF, coded_allele]<EOL>)<EOL>yield Genotypes(variant, g, v.REF, coded_allele,<EOL>multiallelic=len(v.ALT) > <NUM_LIT:1>)<EOL><DEDENT><DEDENT>
|
Iterate over variants in a region.
|
f8381:c0:m6
|
def __init__(self, filename, sample_filename, probability_threshold=<NUM_LIT>):
|
<EOL>self.samples = pd.read_csv(sample_filename, sep="<STR_LIT:U+0020>", skiprows=<NUM_LIT:2>,<EOL>names=["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>"],<EOL>dtype=dict(fid=str, iid=str))<EOL>try:<EOL><INDENT>self.samples = self.samples.set_index("<STR_LIT>", verify_integrity=True)<EOL><DEDENT>except ValueError:<EOL><INDENT>logging.info(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL>self.samples["<STR_LIT>"] = [<EOL>"<STR_LIT>".format(fid=fid, iid=iid)<EOL>for fid, iid in zip(self.samples.fid, self.samples.iid)<EOL>]<EOL>self.samples = self.samples.set_index(<EOL>"<STR_LIT>", verify_integrity=True,<EOL>)<EOL><DEDENT>self._impute2_file = get_open_func(filename)(filename, "<STR_LIT:r>")<EOL>self.has_index = path.isfile(filename + "<STR_LIT>")<EOL>self._impute2_index = None<EOL>self._index_has_location = False<EOL>if self.has_index:<EOL><INDENT>self._impute2_index = get_index(<EOL>filename,<EOL>cols=[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>],<EOL>names=["<STR_LIT>", "<STR_LIT:name>", "<STR_LIT>"],<EOL>sep="<STR_LIT:U+0020>",<EOL>)<EOL>try:<EOL><INDENT>self._impute2_index = self._impute2_index.set_index(<EOL>"<STR_LIT:name>", verify_integrity=True,<EOL>)<EOL>self._has_duplicated = False<EOL><DEDENT>except ValueError as e:<EOL><INDENT>self._has_duplicated = True<EOL>duplicated = self._impute2_index.name.duplicated(keep=False)<EOL>duplicated_markers = self._impute2_index.loc[<EOL>duplicated, "<STR_LIT:name>"<EOL>]<EOL>duplicated_marker_counts = duplicated_markers.value_counts()<EOL>self._dup_markers = {<EOL>m: [] for m in duplicated_marker_counts.index<EOL>}<EOL>logging.found_duplicates(duplicated_marker_counts.iteritems())<EOL>counter = Counter()<EOL>for i, marker in duplicated_markers.iteritems():<EOL><INDENT>counter[marker] += <NUM_LIT:1><EOL>new_name = "<STR_LIT>".format(marker, counter[marker])<EOL>self._impute2_index.loc[i, "<STR_LIT:name>"] = new_name<EOL>self._dup_markers[marker].append(new_name)<EOL><DEDENT>self._impute2_index = self._impute2_index.set_index(<EOL>"<STR_LIT:name>", verify_integrity=True,<EOL>)<EOL><DEDENT>self._index_has_location = (<EOL>"<STR_LIT>" in self._impute2_index.columns and<EOL>"<STR_LIT>" in self._impute2_index.columns<EOL>)<EOL>if self._index_has_location:<EOL><INDENT>self._impute2_index["<STR_LIT>"] = False<EOL>self._impute2_index.loc[<EOL>self._impute2_index.duplicated(["<STR_LIT>", "<STR_LIT>"],<EOL>keep=False),<EOL>"<STR_LIT>"<EOL>] = True<EOL><DEDENT><DEDENT>self.prob_t = probability_threshold<EOL>
|
IMPUTE2 file reader.
Args:
filename (str): The name of the IMPUTE2 file.
sample_filename (str): The name of the SAMPLE file.
probability_threshold (float): The probability threshold.
Note
====
If the sample IDs are not unique, the index is changed to be the
sample family ID and individual ID (i.e. fid_iid).
|
f8382:c0:m0
|
def get_duplicated_markers(self):
|
if self._has_duplicated:<EOL><INDENT>return self._dup_markers<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>
|
Returns the duplicated markers, if any.
Args:
dict: The map for duplicated marker (might be empty).
|
f8382:c0:m1
|
def get_variant_genotypes(self, variant):
|
if not self.has_index:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>try:<EOL><INDENT>impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>".format(variant.chrom)<EOL>)<EOL><DEDENT>variant_info = self._impute2_index[<EOL>(self._impute2_index.chrom == impute2_chrom) &<EOL>(self._impute2_index.pos == variant.pos)<EOL>]<EOL>if variant_info.shape[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>logging.variant_not_found(variant)<EOL>return []<EOL><DEDENT>elif variant_info.shape[<NUM_LIT:0>] == <NUM_LIT:1>:<EOL><INDENT>return self._get_biallelic_variant(variant, variant_info)<EOL><DEDENT>else:<EOL><INDENT>return self._get_multialleic_variant(variant, variant_info)<EOL><DEDENT>
|
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
|
f8382:c0:m3
|
def _get_biallelic_variant(self, variant, info, _check_alleles=True):
|
info = info.iloc[<NUM_LIT:0>, :]<EOL>assert not info.multiallelic<EOL>self._impute2_file.seek(info.seek)<EOL>genotypes = self._parse_impute2_line(self._impute2_file.readline())<EOL>variant_alleles = variant._encode_alleles([<EOL>genotypes.reference, genotypes.coded,<EOL>])<EOL>if (_check_alleles and variant_alleles != variant.alleles):<EOL><INDENT>logging.variant_not_found(variant)<EOL>return []<EOL><DEDENT>return [genotypes]<EOL>
|
Creates a bi-allelic variant.
|
f8382:c0:m4
|
def iter_genotypes(self):
|
<EOL>self._impute2_file.seek(<NUM_LIT:0>)<EOL>for i, line in enumerate(self._impute2_file):<EOL><INDENT>genotypes = self._parse_impute2_line(line)<EOL>variant_info = None<EOL>if self.has_index:<EOL><INDENT>variant_info = self._impute2_index.iloc[i, :]<EOL><DEDENT>self._fix_genotypes_object(genotypes, variant_info)<EOL>yield genotypes<EOL><DEDENT>
|
Iterates on available markers.
Returns:
Genotypes instances.
|
f8382:c0:m6
|
def iter_variants(self):
|
if not self.has_index:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>for name, row in self._impute2_index.iterrows():<EOL><INDENT>f = self._impute2_file<EOL>f.seek(int(row.seek))<EOL>chrom, name, pos, a1, a2 = f.read(<NUM_LIT>).split("<STR_LIT:U+0020>")[:<NUM_LIT:5>]<EOL>pos = int(pos)<EOL>yield Variant(name, CHROM_STR_ENCODE.get(chrom, chrom), pos,<EOL>[a1, a2])<EOL><DEDENT>
|
Iterate over marker information.
|
f8382:c0:m7
|
def get_variants_in_region(self, chrom, start, end):
|
if not self.has_index:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if not self._index_has_location:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>required = self._impute2_index.loc[<EOL>(self._impute2_index.chrom == CHROM_STR_TO_INT[chrom]) &<EOL>(start <= self._impute2_index.pos) &<EOL>(self._impute2_index.pos <= end)<EOL>]<EOL>for name, variant_info in required.iterrows():<EOL><INDENT>for genotypes in self.get_variant_by_name(name, variant_info):<EOL><INDENT>self._fix_genotypes_object(genotypes, variant_info)<EOL>yield genotypes<EOL><DEDENT><DEDENT>
|
Iterate over variants in a region.
|
f8382:c0:m8
|
def get_variant_by_name(self, name, variant_info=None):
|
<EOL>if not self.has_index:<EOL><INDENT>raise NotImplementedError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>if variant_info is None:<EOL><INDENT>try:<EOL><INDENT>variant_info = self._impute2_index.loc[name, :]<EOL><DEDENT>except KeyError:<EOL><INDENT>if name in self.get_duplicated_markers():<EOL><INDENT>return [<EOL>self.get_variant_by_name(dup_name).pop()<EOL>for dup_name in self.get_duplicated_markers()[name]<EOL>]<EOL><DEDENT>else:<EOL><INDENT>logging.variant_name_not_found(name)<EOL>return []<EOL><DEDENT><DEDENT><DEDENT>self._impute2_file.seek(variant_info.seek)<EOL>genotypes = self._parse_impute2_line(self._impute2_file.readline())<EOL>self._fix_genotypes_object(genotypes, variant_info)<EOL>return [genotypes]<EOL>
|
Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
variant_info (pandas.Series): The marker information (e.g. seek).
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
dataset. Hence, we can use the 'get_geno_marker' function and be
sure only one variant is returned.
|
f8382:c0:m9
|
def _fix_genotypes_object(self, genotypes, variant_info):
|
<EOL>if self.has_index and variant_info.name != genotypes.variant.name:<EOL><INDENT>if not variant_info.name.startswith(genotypes.variant.name):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>genotypes.variant.name = variant_info.name<EOL><DEDENT>if self.has_index and self._index_has_location:<EOL><INDENT>genotypes.multiallelic = variant_info.multiallelic<EOL><DEDENT>else:<EOL><INDENT>logging.warning("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>
|
Fixes a genotypes object (variant name, multi-allelic value.
|
f8382:c0:m10
|
def get_number_samples(self):
|
return self.samples.shape[<NUM_LIT:0>]<EOL>
|
Returns the number of samples.
Returns:
int: The number of samples.
|
f8382:c0:m11
|
def get_number_variants(self):
|
if self.has_index:<EOL><INDENT>return self._impute2_index.shape[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
|
Returns the number of markers.
Returns:
int: The number of markers.
|
f8382:c0:m12
|
def _parse_impute2_line(self, line):
|
<EOL>row = line.rstrip("<STR_LIT:\r\n>").split("<STR_LIT:U+0020>")<EOL>prob = np.array(row[<NUM_LIT:5>:], dtype=float)<EOL>prob.shape = (prob.shape[<NUM_LIT:0>] // <NUM_LIT:3>, <NUM_LIT:3>)<EOL>dosage = <NUM_LIT:2> * prob[:, <NUM_LIT:2>] + prob[:, <NUM_LIT:1>]<EOL>if self.prob_t > <NUM_LIT:0>:<EOL><INDENT>dosage[~np.any(prob >= self.prob_t, axis=<NUM_LIT:1>)] = np.nan<EOL><DEDENT>return Genotypes(<EOL>Variant(row[<NUM_LIT:1>], CHROM_STR_ENCODE.get(row[<NUM_LIT:0>], row[<NUM_LIT:0>]), int(row[<NUM_LIT:2>]),<EOL>[row[<NUM_LIT:3>], row[<NUM_LIT:4>]]),<EOL>dosage,<EOL>reference=row[<NUM_LIT:3>],<EOL>coded=row[<NUM_LIT:4>],<EOL>multiallelic=False,<EOL>)<EOL>
|
Parses the current IMPUTE2 line (a single variant).
Args:
line (str): An IMPUTE2 line.
Returns:
Genotypes: The genotype in dosage format.
Warning
=======
By default, the genotypes object has multiallelic set to False.
|
f8382:c0:m14
|
def __init__(self, filename, sample_filename=None, chromosome=None,<EOL>probability_threshold=<NUM_LIT>, cpus=<NUM_LIT:1>):
|
<EOL>if cpus == <NUM_LIT:1>:<EOL><INDENT>self.is_parallel = False<EOL>self._bgen = PyBGEN(filename, prob_t=probability_threshold)<EOL><DEDENT>else:<EOL><INDENT>self.is_parallel = True<EOL>self._bgen = ParallelPyBGEN(filename, prob_t=probability_threshold,<EOL>cpus=cpus)<EOL><DEDENT>self.samples = self._bgen.samples<EOL>if self.samples is None:<EOL><INDENT>if sample_filename is None:<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>self._parse_sample_file(sample_filename)<EOL><DEDENT>self.chrom = chromosome<EOL>
|
BGEN file reader.
Args:
filename (str): The name of the BGEN file.
sample_filename (str): The name of the sample file (optional).
probability_threshold (float): The probability threshold.
|
f8384:c0:m0
|
def get_variant_genotypes(self, variant):
|
<EOL>chrom = variant.chrom.name<EOL>if self.chrom is not None and chrom == self.chrom:<EOL><INDENT>chrom = "<STR_LIT>"<EOL><DEDENT>results = []<EOL>iterator = self._bgen.iter_variants_in_region(<EOL>CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos,<EOL>)<EOL>for info, dosage in iterator:<EOL><INDENT>if (variant.alleles is None or<EOL>variant.iterable_alleles_eq([info.a1, info.a2])):<EOL><INDENT>results.append(Genotypes(<EOL>Variant(<EOL>info.name,<EOL>CHROM_STR_ENCODE.get(info.chrom, info.chrom),<EOL>info.pos, [info.a1, info.a2],<EOL>),<EOL>dosage,<EOL>reference=info.a1,<EOL>coded=info.a2,<EOL>multiallelic=True,<EOL>))<EOL><DEDENT><DEDENT>if not results:<EOL><INDENT>logging.variant_name_not_found(variant)<EOL><DEDENT>return results<EOL>
|
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
|
f8384:c0:m2
|
def iter_genotypes(self):
|
for info, dosage in self._bgen.iter_variants():<EOL><INDENT>yield Genotypes(<EOL>Variant(<EOL>info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom),<EOL>info.pos, [info.a1, info.a2],<EOL>),<EOL>dosage,<EOL>reference=info.a1,<EOL>coded=info.a2,<EOL>multiallelic=True,<EOL>)<EOL><DEDENT>
|
Iterates on available markers.
Returns:
Genotypes instances.
|
f8384:c0:m3
|
def iter_variants(self):
|
for variant in self._bgen.iter_variant_info():<EOL><INDENT>yield Variant(<EOL>variant.name,<EOL>CHROM_STR_ENCODE.get(variant.chrom, variant.chrom),<EOL>variant.pos, [variant.a1, variant.a2],<EOL>)<EOL><DEDENT>
|
Iterate over marker information.
|
f8384:c0:m4
|
def get_variants_in_region(self, chrom, start, end):
|
if self.chrom is not None and chrom == self.chrom:<EOL><INDENT>chrom = "<STR_LIT>"<EOL><DEDENT>iterator = self._bgen.iter_variants_in_region(<EOL>CHROM_STR_DECODE.get(chrom, chrom), start, end,<EOL>)<EOL>for info, dosage in iterator:<EOL><INDENT>yield Genotypes(<EOL>Variant(<EOL>info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom),<EOL>info.pos, [info.a1, info.a2],<EOL>),<EOL>dosage,<EOL>reference=info.a1,<EOL>coded=info.a2,<EOL>multiallelic=True,<EOL>)<EOL><DEDENT>
|
Iterate over variants in a region.
|
f8384:c0:m5
|
def iter_variants_by_names(self, names):
|
if not self.is_parallel:<EOL><INDENT>yield from super().iter_variants_by_names(names)<EOL><DEDENT>else:<EOL><INDENT>for info, dosage in self._bgen.iter_variants_by_names(names):<EOL><INDENT>yield Genotypes(<EOL>Variant(info.name,<EOL>CHROM_STR_ENCODE.get(info.chrom, info.chrom),<EOL>info.pos, [info.a1, info.a2]),<EOL>dosage,<EOL>reference=info.a1,<EOL>coded=info.a2,<EOL>multiallelic=True,<EOL>)<EOL><DEDENT><DEDENT>
|
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
|
f8384:c0:m6
|
def get_variant_by_name(self, name):
|
results = []<EOL>try:<EOL><INDENT>for info, dosage in self._bgen.get_variant(name):<EOL><INDENT>results.append(Genotypes(<EOL>Variant(<EOL>info.name,<EOL>CHROM_STR_ENCODE.get(info.chrom, info.chrom),<EOL>info.pos,<EOL>[info.a1, info.a2],<EOL>),<EOL>dosage,<EOL>reference=info.a1,<EOL>coded=info.a2,<EOL>multiallelic=False,<EOL>))<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>logging.variant_name_not_found(name)<EOL><DEDENT>return results<EOL>
|
Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes.
|
f8384:c0:m7
|
def get_number_samples(self):
|
return self._bgen.nb_samples<EOL>
|
Returns the number of samples.
Returns:
int: The number of samples.
|
f8384:c0:m8
|
def get_number_variants(self):
|
return self._bgen.nb_variants<EOL>
|
Returns the number of markers.
Returns:
int: The number of markers.
|
f8384:c0:m9
|
def get_samples(self):
|
return list(self.samples)<EOL>
|
Returns the list of samples.
|
f8384:c0:m10
|
def __init__(self, prefix):
|
self.bed = PyPlink(prefix)<EOL>self.bim = self.bed.get_bim()<EOL>self.fam = self.bed.get_fam()<EOL>self.bim["<STR_LIT>"] = False<EOL>self.bim.loc[<EOL>self.bim.duplicated(["<STR_LIT>", "<STR_LIT>"], keep=False),<EOL>"<STR_LIT>"<EOL>] = True<EOL>try:<EOL><INDENT>self.fam = self.fam.set_index("<STR_LIT>", verify_integrity=True)<EOL><DEDENT>except ValueError:<EOL><INDENT>logging.info(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL>self.fam["<STR_LIT>"] = [<EOL>"<STR_LIT>".format(fid=fid, iid=iid)<EOL>for fid, iid in zip(self.fam.fid, self.fam.iid)<EOL>]<EOL>self.fam = self.fam.set_index("<STR_LIT>", verify_integrity=True)<EOL><DEDENT>
|
Binary plink file reader.
Args:
prefix (str): the prefix of the Plink binary files.
|
f8385:c0:m0
|
def get_variant_genotypes(self, variant):
|
<EOL>try:<EOL><INDENT>plink_chrom = CHROM_STR_TO_INT[variant.chrom.name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>".format(variant.chrom)<EOL>)<EOL><DEDENT>info = self.bim.loc[<EOL>(self.bim.chrom == plink_chrom) &<EOL>(self.bim.pos == variant.pos), :<EOL>]<EOL>if info.shape[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>logging.variant_not_found(variant)<EOL>return []<EOL><DEDENT>elif info.shape[<NUM_LIT:0>] == <NUM_LIT:1>:<EOL><INDENT>return self._get_biallelic_variant(variant, info)<EOL><DEDENT>else:<EOL><INDENT>return self._get_multialleic_variant(variant, info)<EOL><DEDENT>
|
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
Note
====
If the sample IDs are not unique, the index is changed to be the
sample family ID and individual ID (i.e. fid_iid).
|
f8385:c0:m2
|
def iter_genotypes(self):
|
<EOL>for i, (_, genotypes) in enumerate(self.bed.iter_geno()):<EOL><INDENT>info = self.bim.iloc[i, :]<EOL>yield Genotypes(<EOL>Variant(info.name, CHROM_INT_TO_STR[info.chrom],<EOL>info.pos, [info.a1, info.a2]),<EOL>self._normalize_missing(genotypes),<EOL>reference=info.a2,<EOL>coded=info.a1,<EOL>multiallelic=info.multiallelic<EOL>)<EOL><DEDENT>
|
Iterates on available markers.
Returns:
Genotypes instances.
Note
====
If the sample IDs are not unique, the index is changed to be the
sample family ID and individual ID (i.e. fid_iid).
|
f8385:c0:m5
|
def iter_variants(self):
|
for idx, row in self.bim.iterrows():<EOL><INDENT>yield Variant(<EOL>row.name, CHROM_INT_TO_STR[row.chrom], row.pos,<EOL>[row.a1, row.a2]<EOL>)<EOL><DEDENT>
|
Iterate over marker information.
|
f8385:c0:m6
|
def get_variants_in_region(self, chrom, start, end):
|
bim = self.bim.loc[<EOL>(self.bim["<STR_LIT>"] == CHROM_STR_TO_INT[chrom]) &<EOL>(start <= self.bim["<STR_LIT>"]) &<EOL>(self.bim["<STR_LIT>"] <= end)<EOL>]<EOL>for i, g in enumerate(self.bed.iter_geno_marker(bim.index)):<EOL><INDENT>info = bim.iloc[i, :]<EOL>name, geno = g<EOL>yield Genotypes(<EOL>Variant(info.name, CHROM_INT_TO_STR[info.chrom],<EOL>info.pos, [info.a1, info.a2]),<EOL>self._normalize_missing(geno),<EOL>reference=info.a2,<EOL>coded=info.a1,<EOL>multiallelic=info.multiallelic<EOL>)<EOL><DEDENT>
|
Iterate over variants in a region.
|
f8385:c0:m7
|
def get_variant_by_name(self, name):
|
<EOL>try:<EOL><INDENT>geno, i = self.bed.get_geno_marker(name, return_index=True)<EOL><DEDENT>except ValueError:<EOL><INDENT>if name in self.bed.get_duplicated_markers():<EOL><INDENT>return [<EOL>self.get_variant_by_name(dup_name).pop()<EOL>for dup_name in self.bed.get_duplicated_markers()[name]<EOL>]<EOL><DEDENT>else:<EOL><INDENT>logging.variant_name_not_found(name)<EOL>return []<EOL><DEDENT><DEDENT>else:<EOL><INDENT>info = self.bim.iloc[i, :]<EOL>return [Genotypes(<EOL>Variant(info.name, CHROM_INT_TO_STR[info.chrom], info.pos,<EOL>[info.a1, info.a2]),<EOL>self._normalize_missing(geno),<EOL>reference=info.a2,<EOL>coded=info.a1,<EOL>multiallelic=info.multiallelic,<EOL>)]<EOL><DEDENT>
|
Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
dataset. Hence, we can use the 'get_geno_marker' function and be
sure only one variant is returned.
|
f8385:c0:m8
|
def get_number_samples(self):
|
return self.bed.get_nb_samples()<EOL>
|
Returns the number of samples.
Returns:
int: The number of samples.
|
f8385:c0:m9
|
def get_number_variants(self):
|
return self.bed.get_nb_markers()<EOL>
|
Returns the number of markers.
Returns:
int: The number of markers.
|
f8385:c0:m10
|
@staticmethod<EOL><INDENT>def _normalize_missing(g):<DEDENT>
|
g = g.astype(float)<EOL>g[g == -<NUM_LIT:1.0>] = np.nan<EOL>return g<EOL>
|
Normalize a plink genotype vector.
|
f8385:c0:m12
|
def _seek_generator(f):
|
yield <NUM_LIT:0><EOL>for line in f:<EOL><INDENT>yield f.tell()<EOL><DEDENT>
|
Yields seek position for each line.
Args:
f (file): the file object.
|
f8387:m0
|
def generate_index(fn, cols=None, names=None, sep="<STR_LIT:U+0020>"):
|
<EOL>assert cols is not None, "<STR_LIT>"<EOL>assert names is not None, "<STR_LIT>"<EOL>assert len(cols) == len(names)<EOL>bgzip, open_func = get_open_func(fn, return_fmt=True)<EOL>data = pd.read_csv(fn, sep=sep, engine="<STR_LIT:c>", usecols=cols, names=names,<EOL>compression="<STR_LIT>" if bgzip else None)<EOL>f = open_func(fn, "<STR_LIT:rb>")<EOL>data["<STR_LIT>"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-<NUM_LIT:1>]<EOL>f.close()<EOL>write_index(get_index_fn(fn), data)<EOL>return data<EOL>
|
Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
|
f8387:m1
|
def get_open_func(fn, return_fmt=False):
|
<EOL>bgzip = None<EOL>with open(fn, "<STR_LIT:rb>") as i_file:<EOL><INDENT>bgzip = i_file.read(<NUM_LIT:3>) == b"<STR_LIT>"<EOL><DEDENT>if bgzip and not HAS_BIOPYTHON:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>open_func = open<EOL>if bgzip:<EOL><INDENT>open_func = BgzfReader<EOL><DEDENT>try:<EOL><INDENT>with open_func(fn, "<STR_LIT:r>") as i_file:<EOL><INDENT>if bgzip:<EOL><INDENT>if not i_file.seekable():<EOL><INDENT>raise ValueError<EOL><DEDENT><DEDENT>pass<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>raise ValueError("<STR_LIT>".format(fn))<EOL><DEDENT>if return_fmt:<EOL><INDENT>return bgzip, open_func<EOL><DEDENT>return open_func<EOL>
|
Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function.
|
f8387:m2
|
def get_index(fn, cols, names, sep):
|
if not has_index(fn):<EOL><INDENT>return generate_index(fn, cols, names, sep)<EOL><DEDENT>file_index = read_index(get_index_fn(fn))<EOL>if len(set(names) - (set(file_index.columns) - {'<STR_LIT>'})) != <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>".format(fn))<EOL><DEDENT>if "<STR_LIT>" not in file_index.columns:<EOL><INDENT>raise ValueError("<STR_LIT>".format(fn))<EOL><DEDENT>return file_index<EOL>
|
Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created.
|
f8387:m3
|
def write_index(fn, index):
|
with open(fn, "<STR_LIT:wb>") as o_file:<EOL><INDENT>o_file.write(_CHECK_STRING)<EOL>o_file.write(zlib.compress(bytes(<EOL>index.to_csv(None, index=False, encoding="<STR_LIT:utf-8>"),<EOL>encoding="<STR_LIT:utf-8>",<EOL>)))<EOL><DEDENT>
|
Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index.
|
f8387:m4
|
def read_index(fn):
|
index = None<EOL>with open(fn, "<STR_LIT:rb>") as i_file:<EOL><INDENT>if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:<EOL><INDENT>raise ValueError("<STR_LIT>".format(fn))<EOL><DEDENT>index = pd.read_csv(io.StringIO(<EOL>zlib.decompress(i_file.read()).decode(encoding="<STR_LIT:utf-8>"),<EOL>))<EOL><DEDENT>return index<EOL>
|
Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
|
f8387:m5
|
def get_index_fn(fn):
|
return path.abspath("<STR_LIT>".format(fn))<EOL>
|
Generates the index filename from the path to the indexed file.
Args:
fn (str): the name of the file for which we want an index.
Returns:
str: the name of the file containing the index.
|
f8387:m6
|
def has_index(fn):
|
return path.isfile(get_index_fn(fn))<EOL>
|
Checks if the index exists.
Args:
fn (str): the name of the file for which we want the index.
Returns:
bool: ``True`` if the file contains an index, ``False`` otherwise.
|
f8387:m7
|
def index_impute2(fn):
|
logger.info("<STR_LIT>".format(fn))<EOL>impute2_index(fn, cols=[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>], names=["<STR_LIT>", "<STR_LIT:name>", "<STR_LIT>"], sep="<STR_LIT:U+0020>")<EOL>logger.info("<STR_LIT>")<EOL>
|
Indexes an IMPUTE2 file.
Args:
fn (str): The name of the IMPUTE2 file.
|
f8388:m1
|
def index_bgen(fn, legacy=False):
|
logger.info("<STR_LIT>".format(<EOL>fn, "<STR_LIT>" if legacy else "<STR_LIT>",<EOL>))<EOL>command = ["<STR_LIT>", "<STR_LIT>", fn, "<STR_LIT>"]<EOL>if legacy:<EOL><INDENT>command.append("<STR_LIT>")<EOL><DEDENT>try:<EOL><INDENT>logger.info("<STR_LIT>".format("<STR_LIT:U+0020>".join(command)))<EOL>subprocess.Popen(command).communicate()<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>logger.error("<STR_LIT>".format(fn))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>logger.info("<STR_LIT>")<EOL>
|
Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file.
|
f8388:m2
|
def parse_args():
|
parser = argparse.ArgumentParser(<EOL>prog="<STR_LIT>",<EOL>description="<STR_LIT>"<EOL>)<EOL>group = parser.add_argument_group("<STR_LIT>")<EOL>group.add_argument(<EOL>"<STR_LIT>", metavar="<STR_LIT>", type=str, nargs="<STR_LIT:+>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>group = parser.add_argument_group("<STR_LIT>")<EOL>group.add_argument(<EOL>"<STR_LIT>", metavar="<STR_LIT>", type=str, nargs="<STR_LIT:+>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>group.add_argument(<EOL>"<STR_LIT>", action="<STR_LIT:store_true>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>return parser.parse_args()<EOL>
|
Parses the arguments and options.
|
f8388:m3
|
def vcf_writer(parser, keep, extract, args):
|
<EOL>output = sys.stdout if args.output == "<STR_LIT:->" else open(args.output, "<STR_LIT:w>")<EOL>try:<EOL><INDENT>samples = np.array(parser.get_samples(), dtype=str)<EOL>k = _get_sample_select(samples=samples, keep=keep)<EOL>output.write(_VCF_HEADER.format(<EOL>date=datetime.today().strftime("<STR_LIT>"),<EOL>version=__version__,<EOL>samples="<STR_LIT:\t>".join(samples[k]),<EOL>))<EOL>generator = _get_generator(parser=parser, extract=extract, keep=k,<EOL>check_maf=args.maf)<EOL>nb_extracted = <NUM_LIT:0><EOL>for data in generator:<EOL><INDENT>genotypes = data.genotypes<EOL>af = np.nanmean(genotypes) / <NUM_LIT:2><EOL>print(data.variant.chrom, data.variant.pos, data.variant.name,<EOL>data.reference, data.coded, "<STR_LIT:.>", "<STR_LIT>", "<STR_LIT>".format(af),<EOL>"<STR_LIT>", sep="<STR_LIT:\t>", end="<STR_LIT>", file=output)<EOL>for geno in genotypes:<EOL><INDENT>if np.isnan(geno):<EOL><INDENT>output.write("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>rounded_geno = int(round(geno, <NUM_LIT:0>))<EOL>output.write("<STR_LIT>".format(<EOL>_VCF_GT_MAP[rounded_geno], geno,<EOL>))<EOL><DEDENT><DEDENT>output.write("<STR_LIT:\n>")<EOL>nb_extracted += <NUM_LIT:1><EOL><DEDENT>if nb_extracted == <NUM_LIT:0>:<EOL><INDENT>logger.warning("<STR_LIT>")<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>output.close()<EOL><DEDENT>
|
Writes the data in VCF format.
|
f8393:m1
|
def csv_writer(parser, keep, extract, args):
|
<EOL>output = sys.stdout if args.output == "<STR_LIT:->" else open(args.output, "<STR_LIT:w>")<EOL>try:<EOL><INDENT>samples = np.array(parser.get_samples(), dtype=str)<EOL>k = _get_sample_select(samples=samples, keep=keep)<EOL>print("<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>", "<STR_LIT>", "<STR_LIT>", sep="<STR_LIT:U+002C>", file=output)<EOL>generator = _get_generator(parser=parser, extract=extract, keep=k,<EOL>check_maf=args.maf)<EOL>nb_extracted = <NUM_LIT:0><EOL>for data in generator:<EOL><INDENT>genotypes = data.genotypes<EOL>hard_call_mapping = {<EOL><NUM_LIT:0>: "<STR_LIT>".format(ref=data.reference),<EOL><NUM_LIT:1>: "<STR_LIT>".format(ref=data.reference, alt=data.coded),<EOL><NUM_LIT:2>: "<STR_LIT>".format(alt=data.coded),<EOL>}<EOL>for sample, geno in zip(samples[k], genotypes):<EOL><INDENT>is_missing = np.isnan(geno)<EOL>hard_coded = None<EOL>if is_missing:<EOL><INDENT>geno = "<STR_LIT>"<EOL>hard_coded = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>hard_coded = hard_call_mapping[int(round(geno, <NUM_LIT:0>))]<EOL><DEDENT>print(sample, data.variant.name, data.variant.chrom,<EOL>data.variant.pos, data.reference, data.coded,<EOL>geno, hard_coded, sep="<STR_LIT:U+002C>", file=output)<EOL><DEDENT>nb_extracted += <NUM_LIT:1><EOL><DEDENT>if nb_extracted == <NUM_LIT:0>:<EOL><INDENT>logger.warning("<STR_LIT>")<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>output.close()<EOL><DEDENT>
|
Writes the data in CSV format.
|
f8393:m2
|
def bed_writer(parser, keep, extract, args):
|
<EOL>bim_fn = args.output + "<STR_LIT>"<EOL>with open(bim_fn, "<STR_LIT:w>") as bim, PyPlink(args.output, "<STR_LIT:w>") as bed:<EOL><INDENT>samples = np.array(parser.get_samples(), dtype=str)<EOL>k = _get_sample_select(samples=samples, keep=keep)<EOL>with open(args.output + "<STR_LIT>", "<STR_LIT:w>") as fam:<EOL><INDENT>for sample in samples[k]:<EOL><INDENT>print(sample, sample, "<STR_LIT:0>", "<STR_LIT:0>", "<STR_LIT:0>", "<STR_LIT>", sep="<STR_LIT:U+0020>", file=fam)<EOL><DEDENT><DEDENT>generator = _get_generator(parser=parser, extract=extract, keep=k,<EOL>check_maf=args.maf)<EOL>nb_extracted = <NUM_LIT:0><EOL>for data in generator:<EOL><INDENT>genotypes = data.genotypes<EOL>genotypes[np.isnan(genotypes)] = -<NUM_LIT:1><EOL>genotypes = np.round(genotypes, <NUM_LIT:0>)<EOL>bed.write_genotypes(genotypes)<EOL>print(<EOL>_PLINK_CHROM_ENCODE.get(str(data.variant.chrom),<EOL>data.variant.chrom),<EOL>data.variant.name, "<STR_LIT:0>", data.variant.pos, data.coded,<EOL>data.reference, sep="<STR_LIT:\t>", file=bim,<EOL>)<EOL>nb_extracted += <NUM_LIT:1><EOL><DEDENT>if nb_extracted == <NUM_LIT:0>:<EOL><INDENT>logger.warning("<STR_LIT>")<EOL><DEDENT><DEDENT>
|
Writes BED/BIM/FAM files.
|
f8393:m3
|
def _get_sample_select(samples, keep):
|
k = np.ones_like(samples, dtype=bool)<EOL>if keep is not None:<EOL><INDENT>k = np.array([s in keep for s in samples], dtype=bool)<EOL>if np.sum(k) == <NUM_LIT:0>:<EOL><INDENT>logger.warning("<STR_LIT>")<EOL><DEDENT><DEDENT>return k<EOL>
|
Returns a vector of True/False to keep samples.
|
f8393:m4
|
def _get_generator(parser, extract, keep, check_maf):
|
if extract is not None:<EOL><INDENT>parser = Extractor(parser, names=extract)<EOL><DEDENT>for data in parser.iter_genotypes():<EOL><INDENT>data.genotypes = data.genotypes[keep]<EOL>if check_maf:<EOL><INDENT>data.code_minor()<EOL><DEDENT>yield data<EOL><DEDENT>
|
Generates the data (with extract markers and keep, if required.
|
f8393:m5
|
def check_args(args):
|
<EOL>if args.output_format not in _streamable_format and args.output == "<STR_LIT:->":<EOL><INDENT>logger.error("<STR_LIT>"<EOL>"<STR_LIT>".format(args.output_format))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if args.output_format == "<STR_LIT>" and args.output != "<STR_LIT:->":<EOL><INDENT>if not args.output.endswith("<STR_LIT>"):<EOL><INDENT>args.output += "<STR_LIT>"<EOL><DEDENT><DEDENT>elif args.output_format == "<STR_LIT>":<EOL><INDENT>if args.output.endswith("<STR_LIT>"):<EOL><INDENT>args.output = args.output[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>
|
Checks the arguments and options.
|
f8393:m6
|
def parse_args():
|
parser = argparse.ArgumentParser(<EOL>prog="<STR_LIT>",<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>epilog="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>group = parser.add_argument_group("<STR_LIT>")<EOL>group.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>", metavar="<STR_LIT>", required=True, type=str,<EOL>dest="<STR_LIT>", choices=set(parsers.keys()),<EOL>help="<STR_LIT>",<EOL>)<EOL>group.add_argument(<EOL>nargs="<STR_LIT:+>", dest="<STR_LIT>", type=str, metavar="<STR_LIT>",<EOL>help="<STR_LIT>",<EOL>)<EOL>group = parser.add_argument_group("<STR_LIT>")<EOL>group.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>", metavar="<STR_LIT>", type=argparse.FileType("<STR_LIT:r>"),<EOL>help="<STR_LIT>",<EOL>)<EOL>group.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>", metavar="<STR_LIT>", type=argparse.FileType("<STR_LIT:r>"),<EOL>help="<STR_LIT>",<EOL>)<EOL>group.add_argument(<EOL>"<STR_LIT>", action="<STR_LIT:store_true>",<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>group = parser.add_argument_group("<STR_LIT>")<EOL>group.add_argument(<EOL>"<STR_LIT>", "<STR_LIT>", metavar="<STR_LIT>", type=str, required=True,<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>group.add_argument(<EOL>"<STR_LIT>", metavar="<STR_LIT>", default="<STR_LIT>", type=str,<EOL>choices={"<STR_LIT>", "<STR_LIT>", "<STR_LIT>"},<EOL>help="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>)<EOL>return parser.parse_args()<EOL>
|
Parses the arguments and options.
|
f8393:m7
|
def _get_variant_silent(parser, variant):
|
prev_log = config.LOG_NOT_FOUND<EOL>config.LOG_NOT_FOUND = False<EOL>results = parser.get_variant_genotypes(variant)<EOL>config.LOG_NOT_FOUND = prev_log<EOL>return results<EOL>
|
Gets a variant from the parser while disabling logging.
|
f8394:m0
|
def complement_alleles(s):
|
trans = str.maketrans("<STR_LIT>", "<STR_LIT>")<EOL>return s.translate(trans)[::-<NUM_LIT:1>]<EOL>
|
Complement an allele string.
This will apply the following translation table to the alleles:
A -> T
G -> C
and vice versa.
Other characters will be left as-is.
|
f8396:m0
|
def complement_alleles(self):
|
self.alleles = self._encode_alleles(<EOL>[complement_alleles(i) for i in self.alleles]<EOL>)<EOL>
|
Complement the alleles of this variant.
This will call this module's `complement_alleles` function.
Note that this will not create a new object, but modify the state of
the current instance.
|
f8396:c1:m12
|
def __eq__(self, other):
|
locus_match = self.locus_eq(other)<EOL>if self.alleles is None or other.alleles is None:<EOL><INDENT>return locus_match<EOL><DEDENT>overlap = len(self.alleles_set & other.alleles_set) >= <NUM_LIT:2><EOL>return locus_match and overlap<EOL>
|
Tests for the equality between two variants.
If any variant has undefined alleles, we return the locus equality.
Else, we return True if at least two alleles are the same in both
variants.
|
f8396:c1:m13
|
def __init__(self, variant, genotypes, reference, coded, multiallelic):
|
self.variant = variant<EOL>self.genotypes = genotypes<EOL>self.reference = str(reference).upper()<EOL>self.multiallelic = multiallelic<EOL>if variant.alleles and (self.reference not in variant.alleles):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(self.reference, variant.alleles)<EOL>)<EOL><DEDENT>self.coded = str(coded).upper()<EOL>if variant.alleles and (self.coded not in variant.alleles):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(self.coded, variant.alleles)<EOL>)<EOL><DEDENT>
|
Class holding information on a variant as well as a vector of
genotypes.
The "reference" allele corresponds to 0 and the "coded" allele
corresponds to 1.
|
f8396:c3:m0
|
def flip(self):
|
self.flip_coded()<EOL>
|
Flips the reference and coded alleles of this instance.
|
f8396:c3:m2
|
def flip_coded(self):
|
self.genotypes = <NUM_LIT:2> - self.genotypes<EOL>self.reference, self.coded = self.coded, self.reference<EOL>
|
Flips the coding of the alleles.
|
f8396:c3:m3
|
def flip_strand(self):
|
self.reference = complement_alleles(self.reference)<EOL>self.coded = complement_alleles(self.coded)<EOL>self.variant.complement_alleles()<EOL>
|
Flips the strand of the alleles.
|
f8396:c3:m4
|
def coded_freq(self):
|
return np.nanmean(self.genotypes) / <NUM_LIT:2><EOL>
|
Gets the frequency of the coded allele.
|
f8396:c3:m6
|
def code_minor(self):
|
coded_freq = self.coded_freq()<EOL>if coded_freq > <NUM_LIT:0.5>:<EOL><INDENT>self.flip_coded()<EOL><DEDENT>
|
Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the
genotype value is the number of minor alleles for an individual.
|
f8396:c3:m7
|
def __init__(self, chrom_to_reader):
|
self.chrom_to_reader = chrom_to_reader<EOL>samples = None<EOL>self.n_vars = <NUM_LIT:0><EOL>for chrom, reader in self.chrom_to_reader.items():<EOL><INDENT>self.n_vars += reader.get_number_variants()<EOL>cur_samples = reader.get_samples()<EOL>if samples is None:<EOL><INDENT>samples = cur_samples<EOL><DEDENT>else:<EOL><INDENT>if samples != cur_samples:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>samples = cur_samples<EOL><DEDENT><DEDENT>self.samples = samples<EOL>
|
Reader to handle genotype access using files split by chromosome.
A dict mapping chromosomes to instances of GenotypesReader should be
passed.
|
f8396:c4:m0
|
def __init__(self):
|
raise NotImplementedError()<EOL>
|
Abstract class to read genotypes data.
|
f8396:c5:m0
|
def iter_variants(self):
|
raise NotImplementedError()<EOL>
|
Iterate over variants without reading the actual genotypes.
This is a generator of Variant instances. Also not that subclasses
can define their own Variant subclasses to represent additional
fields.
To improve consistency, the ImputedVariant class is provided. It
defines a single additional field ("quality") containing a float
between 0 and 1.
|
f8396:c5:m5
|
def iter_genotypes(self):
|
raise NotImplementedError()<EOL>
|
Iterate over variants and read the genotypes.
This method yields instances of Genotypes.
|
f8396:c5:m6
|
def get_variant_genotypes(self, variant):
|
raise NotImplementedError()<EOL>
|
Get the genotypes for a given variant.
Args:
variant (Variant): A variant for which to retrieve genotypes.
Returns:
list: A list of Genotypes. This is a list because for
multi-allelics the representation needs multiple entries.
|
f8396:c5:m7
|
def get_variant_by_name(self, name):
|
raise NotImplementedError()<EOL>
|
Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
|
f8396:c5:m8
|
def iter_variants_by_names(self, names):
|
for name in names:<EOL><INDENT>for result in self.get_variant_by_name(name):<EOL><INDENT>yield result<EOL><DEDENT><DEDENT>
|
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
|
f8396:c5:m9
|
def get_variants_in_region(self, chrom, start, end):
|
raise NotImplementedError()<EOL>
|
Get the variants in a region.
Args:
chrom (str): The chromosome (e.g. 'X' or '3').
start (int): The start position for the region.
end (int): The end position for the region.
|
f8396:c5:m10
|
def get_samples(self):
|
raise NotImplementedError()<EOL>
|
Get an ordered collection of the samples in the genotype container.
|
f8396:c5:m11
|
def get_number_samples(self):
|
raise NotImplementedError()<EOL>
|
Return the number of samples.
|
f8396:c5:m12
|
def get_number_variants(self):
|
raise NotImplementedError()<EOL>
|
Return the number of variants in the file.
|
f8396:c5:m13
|
def flip_alleles(genotypes):
|
warnings.warn("<STR_LIT>", DeprecationWarning)<EOL>genotypes.reference, genotypes.coded = (genotypes.coded,<EOL>genotypes.reference)<EOL>genotypes.genotypes = <NUM_LIT:2> - genotypes.genotypes<EOL>return genotypes<EOL>
|
Flip the alleles of an Genotypes instance.
|
f8397:m0
|
def code_minor(genotypes):
|
warnings.warn("<STR_LIT>", DeprecationWarning)<EOL>_, minor_coded = maf(genotypes)<EOL>if not minor_coded:<EOL><INDENT>return flip_alleles(genotypes)<EOL><DEDENT>return genotypes<EOL>
|
Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the genotype
value is the number of minor alleles for an individual.
|
f8397:m1
|
def maf(genotypes):
|
warnings.warn("<STR_LIT>", DeprecationWarning)<EOL>g = genotypes.genotypes<EOL>maf = np.nansum(g) / (<NUM_LIT:2> * np.sum(~np.isnan(g)))<EOL>if maf > <NUM_LIT:0.5>:<EOL><INDENT>maf = <NUM_LIT:1> - maf<EOL>return maf, False<EOL><DEDENT>return maf, True<EOL>
|
Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
|
f8397:m2
|
def genotype_to_df(g, samples, as_string=False):
|
name = g.variant.name if g.variant.name else "<STR_LIT>"<EOL>df = pd.DataFrame(g.genotypes, index=samples, columns=[name])<EOL>if as_string:<EOL><INDENT>df["<STR_LIT>"] = None<EOL>hard_calls = df[name].round()<EOL>df.loc[hard_calls == <NUM_LIT:0>, "<STR_LIT>"] = "<STR_LIT>".format(g.reference)<EOL>df.loc[hard_calls == <NUM_LIT:1>, "<STR_LIT>"] = "<STR_LIT>".format(g.reference,<EOL>g.coded)<EOL>df.loc[hard_calls == <NUM_LIT:2>, "<STR_LIT>"] = "<STR_LIT>".format(g.coded)<EOL>df = df[["<STR_LIT>"]]<EOL>df.columns = [name]<EOL><DEDENT>return df<EOL>
|
Convert a genotype object to a pandas dataframe.
By default, the encoded values are stored, but the as_string argument can
be used to represent it as characters (alleles) instead.
|
f8397:m4
|
def compute_ld(cur_geno, other_genotypes, r2=False):
|
<EOL>norm_cur = normalize_genotypes(cur_geno)<EOL>norm_others = np.stack(<EOL>tuple(normalize_genotypes(g) for g in other_genotypes),<EOL>axis=<NUM_LIT:1>,<EOL>)<EOL>assert norm_cur.shape[<NUM_LIT:0>] == norm_others.shape[<NUM_LIT:0>]<EOL>n = (<EOL>~np.isnan(norm_cur.reshape(norm_cur.shape[<NUM_LIT:0>], <NUM_LIT:1>)) *<EOL>~np.isnan(norm_others)<EOL>).sum(axis=<NUM_LIT:0>)<EOL>r = pd.Series(<EOL>np.dot(<EOL>np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n<EOL>),<EOL>index=[g.variant.name for g in other_genotypes],<EOL>name="<STR_LIT>" if r2 else "<STR_LIT:r>",<EOL>)<EOL>r.loc[r > <NUM_LIT:1>] = <NUM_LIT:1><EOL>r.loc[r < -<NUM_LIT:1>] = -<NUM_LIT:1><EOL>if r2:<EOL><INDENT>return r ** <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>return r<EOL><DEDENT>
|
Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
The genotypes will automatically be normalized using (x - mean) / std.
|
f8397:m5
|
def normalize_genotypes(genotypes):
|
genotypes = genotypes.genotypes<EOL>return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)<EOL>
|
Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes.
|
f8397:m6
|
def check_python_version():
|
python_major, python_minor = sys.version_info[:<NUM_LIT:2>]<EOL>if python_major != <NUM_LIT:3> or python_minor < <NUM_LIT:4>:<EOL><INDENT>sys.stderr.write("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>
|
Checks the python version, exits if < 3.4.
|
f8399:m0
|
def _python_cmd(*args):
|
args = (sys.executable,) + args<EOL>return subprocess.call(args) == <NUM_LIT:0><EOL>
|
Return True if the command succeeded.
|
f8400:m0
|
def get_zip_class():
|
class ContextualZipFile(zipfile.ZipFile):<EOL><INDENT>def __enter__(self):<EOL><INDENT>return self<EOL><DEDENT>def __exit__(self, type, value, traceback):<EOL><INDENT>self.close<EOL><DEDENT><DEDENT>return zipfile.ZipFile if hasattr(zipfile.ZipFile, '<STR_LIT>') elseContextualZipFile<EOL>
|
Supplement ZipFile class to support context manager for Python 2.6
|
f8400:m3
|
def _clean_check(cmd, target):
|
try:<EOL><INDENT>subprocess.check_call(cmd)<EOL><DEDENT>except subprocess.CalledProcessError:<EOL><INDENT>if os.access(target, os.F_OK):<EOL><INDENT>os.unlink(target)<EOL><DEDENT>raise<EOL><DEDENT>
|
Run the command to download target. If the command fails, clean up before
re-raising the error.
|
f8400:m7
|
def download_file_powershell(url, target):
|
target = os.path.abspath(target)<EOL>cmd = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>"<STR_LIT>" % vars(),<EOL>]<EOL>_clean_check(cmd, target)<EOL>
|
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
|
f8400:m8
|
def download_file_insecure(url, target):
|
try:<EOL><INDENT>from urllib.request import urlopen<EOL><DEDENT>except ImportError:<EOL><INDENT>from urllib2 import urlopen<EOL><DEDENT>src = dst = None<EOL>try:<EOL><INDENT>src = urlopen(url)<EOL>data = src.read()<EOL>dst = open(target, "<STR_LIT:wb>")<EOL>dst.write(data)<EOL><DEDENT>finally:<EOL><INDENT>if src:<EOL><INDENT>src.close()<EOL><DEDENT>if dst:<EOL><INDENT>dst.close()<EOL><DEDENT><DEDENT>
|
Use Python to download the file, even though it cannot authenticate the
connection.
|
f8400:m14
|
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,<EOL>to_dir=os.curdir, delay=<NUM_LIT:15>, downloader_factory=get_best_downloader):
|
<EOL>to_dir = os.path.abspath(to_dir)<EOL>zip_name = "<STR_LIT>" % version<EOL>url = download_base + zip_name<EOL>saveto = os.path.join(to_dir, zip_name)<EOL>if not os.path.exists(saveto): <EOL><INDENT>log.warn("<STR_LIT>", url)<EOL>downloader = downloader_factory()<EOL>downloader(url, saveto)<EOL><DEDENT>return os.path.realpath(saveto)<EOL>
|
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
|
f8400:m16
|
def _build_install_args(options):
|
return ['<STR_LIT>'] if options.user_install else []<EOL>
|
Build the arguments to 'python setup.py install' on the setuptools package
|
f8400:m17
|
def _parse_args():
|
parser = optparse.OptionParser()<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', metavar="<STR_LIT>",<EOL>default=DEFAULT_URL,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT>',<EOL>const=lambda: download_file_insecure, default=get_best_downloader,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_option(<EOL>'<STR_LIT>', help="<STR_LIT>",<EOL>default=DEFAULT_VERSION,<EOL>)<EOL>options, args = parser.parse_args()<EOL>return options<EOL>
|
Parse the command line for options
|
f8400:m18
|
def main():
|
options = _parse_args()<EOL>archive = download_setuptools(<EOL>version=options.version,<EOL>download_base=options.download_base,<EOL>downloader_factory=options.downloader_factory,<EOL>)<EOL>return _install(archive, _build_install_args(options))<EOL>
|
Install or upgrade setuptools and EasyInstall
|
f8400:m19
|
def hsv_to_rgb(hsv):
|
<EOL>h, s, v = hsv<EOL>if s == <NUM_LIT:0>:<EOL><INDENT>return (v, v, v)<EOL><DEDENT>h /= <NUM_LIT><EOL>i = math.floor(h)<EOL>f = h-i<EOL>p = v*(<NUM_LIT:1.0>-s)<EOL>q = v*(<NUM_LIT:1.0>-s*f)<EOL>t = v*(<NUM_LIT:1.0>-s*(<NUM_LIT:1.0>-f))<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>return (v, t, p)<EOL><DEDENT>elif i == <NUM_LIT:1>:<EOL><INDENT>return (q, v, p)<EOL><DEDENT>elif i == <NUM_LIT:2>:<EOL><INDENT>return (p, v, t)<EOL><DEDENT>elif i == <NUM_LIT:3>:<EOL><INDENT>return (p, q, v)<EOL><DEDENT>elif i == <NUM_LIT:4>:<EOL><INDENT>return (t, p, v)<EOL><DEDENT>else:<EOL><INDENT>return (v, p, q)<EOL><DEDENT>
|
Converts a tuple of hue, saturation, value to a tuple of red, green blue.
Hue should be an angle from 0.0 to 359.0. Saturation and value should be a
value from 0.0 to 1.0, where saturation controls the intensity of the hue and
value controls the brightness.
|
f8403:m0
|
def __init__(self, rs, en, d4, d5, d6, d7, cols, lines, backlight=None,<EOL>invert_polarity=True,<EOL>enable_pwm=False,<EOL>gpio=GPIO.get_platform_gpio(),<EOL>pwm=PWM.get_platform_pwm(),<EOL>initial_backlight=<NUM_LIT:1.0>):
|
<EOL>self._cols = cols<EOL>self._lines = lines<EOL>self._gpio = gpio<EOL>self._rs = rs<EOL>self._en = en<EOL>self._d4 = d4<EOL>self._d5 = d5<EOL>self._d6 = d6<EOL>self._d7 = d7<EOL>self._backlight = backlight<EOL>self._pwm_enabled = enable_pwm<EOL>self._pwm = pwm<EOL>self._blpol = not invert_polarity<EOL>for pin in (rs, en, d4, d5, d6, d7):<EOL><INDENT>gpio.setup(pin, GPIO.OUT)<EOL><DEDENT>if backlight is not None:<EOL><INDENT>if enable_pwm:<EOL><INDENT>pwm.start(backlight, self._pwm_duty_cycle(initial_backlight))<EOL><DEDENT>else:<EOL><INDENT>gpio.setup(backlight, GPIO.OUT)<EOL>gpio.output(backlight, self._blpol if initial_backlight else not self._blpol)<EOL><DEDENT><DEDENT>self.write8(<NUM_LIT>)<EOL>self.write8(<NUM_LIT>)<EOL>self.displaycontrol = LCD_DISPLAYON | LCD_CURSOROFF | LCD_BLINKOFF<EOL>self.displayfunction = LCD_4BITMODE | LCD_1LINE | LCD_2LINE | LCD_5x8DOTS<EOL>self.displaymode = LCD_ENTRYLEFT | LCD_ENTRYSHIFTDECREMENT<EOL>self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)<EOL>self.write8(LCD_FUNCTIONSET | self.displayfunction)<EOL>self.write8(LCD_ENTRYMODESET | self.displaymode) <EOL>self.clear()<EOL>
|
Initialize the LCD. RS, EN, and D4...D7 parameters should be the pins
connected to the LCD RS, clock enable, and data line 4 through 7 connections.
The LCD will be used in its 4-bit mode so these 6 lines are the only ones
required to use the LCD. You must also pass in the number of columns and
lines on the LCD.
If you would like to control the backlight, pass in the pin connected to
the backlight with the backlight parameter. The invert_polarity boolean
controls if the backlight is one with a LOW signal or HIGH signal. The
default invert_polarity value is True, i.e. the backlight is on with a
LOW signal.
You can enable PWM of the backlight pin to have finer control on the
brightness. To enable PWM make sure your hardware supports PWM on the
provided backlight pin and set enable_pwm to True (the default is False).
The appropriate PWM library will be used depending on the platform, but
you can provide an explicit one with the pwm parameter.
The initial state of the backlight is ON, but you can set it to an
explicit initial state with the initial_backlight parameter (0 is off,
1 is on/full bright).
You can optionally pass in an explicit GPIO class,
for example if you want to use an MCP230xx GPIO extender. If you don't
pass in an GPIO instance, the default GPIO for the running platform will
be used.
|
f8408:c0:m0
|
def home(self):
|
self.write8(LCD_RETURNHOME) <EOL>self._delay_microseconds(<NUM_LIT>)<EOL>
|
Move the cursor back to its home (first line and first column).
|
f8408:c0:m1
|
def clear(self):
|
self.write8(LCD_CLEARDISPLAY) <EOL>self._delay_microseconds(<NUM_LIT>)<EOL>
|
Clear the LCD.
|
f8408:c0:m2
|
def set_cursor(self, col, row):
|
<EOL>if row > self._lines:<EOL><INDENT>row = self._lines - <NUM_LIT:1><EOL><DEDENT>self.write8(LCD_SETDDRAMADDR | (col + LCD_ROW_OFFSETS[row]))<EOL>
|
Move the cursor to an explicit column and row position.
|
f8408:c0:m3
|
def enable_display(self, enable):
|
if enable:<EOL><INDENT>self.displaycontrol |= LCD_DISPLAYON<EOL><DEDENT>else:<EOL><INDENT>self.displaycontrol &= ~LCD_DISPLAYON<EOL><DEDENT>self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)<EOL>
|
Enable or disable the display. Set enable to True to enable.
|
f8408:c0:m4
|
def show_cursor(self, show):
|
if show:<EOL><INDENT>self.displaycontrol |= LCD_CURSORON<EOL><DEDENT>else:<EOL><INDENT>self.displaycontrol &= ~LCD_CURSORON<EOL><DEDENT>self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)<EOL>
|
Show or hide the cursor. Cursor is shown if show is True.
|
f8408:c0:m5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.