code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
def MakePmfFromItems(t, name='')
Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object
4.605548
9.027739
0.510155
if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) pmf = Pmf(d, name) pmf.Normalize() return pmf
def MakePmfFromHist(hist, name=None)
Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object
4.292873
4.570424
0.939272
if name is None: name = cdf.name pmf = Pmf(name=name) prev = 0.0 for val, prob in cdf.Items(): pmf.Incr(val, prob - prev) prev = prob return pmf
def MakePmfFromCdf(cdf, name=None)
Makes a normalized Pmf from a Cdf object. Args: cdf: Cdf object name: string name for the new Pmf Returns: Pmf object
2.911285
3.459261
0.841592
mix = Pmf(name=name) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix
def MakeMixture(metapmf, name='mix')
Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name for the new Pmf. Returns: Pmf object.
3.817086
4.676869
0.816163
pmf = Pmf() for x in numpy.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
def MakeUniformPmf(low, high, n)
Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values
2.404151
3.158115
0.761261
runsum = 0 xs = [] cs = [] for value, count in sorted(items): runsum += count xs.append(value) cs.append(runsum) total = float(runsum) ps = [c / total for c in cs] cdf = Cdf(xs, ps, name) return cdf
def MakeCdfFromItems(items, name='')
Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs name: string name for this CDF Returns: cdf: list of (value, fraction) pairs
3.3045
3.374561
0.979238
if name == None: name = pmf.name return MakeCdfFromItems(pmf.Items(), name)
def MakeCdfFromPmf(pmf, name=None)
Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object name: string name for the data. Returns: Cdf object
4.121324
6.364226
0.647577
hist = MakeHistFromList(t) d = hist.GetDict() return MakeSuiteFromDict(d)
def MakeSuiteFromList(t, name='')
Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this suite Returns: Suite object
8.299647
9.549314
0.869135
if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
def MakeSuiteFromHist(hist, name=None)
Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object
4.796177
6.161937
0.778355
suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
def MakeSuiteFromDict(d, name='')
Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object
4.797902
10.009813
0.47932
if name is None: name = cdf.name suite = Suite(name=name) prev = 0.0 for val, prob in cdf.Items(): suite.Incr(val, prob - prev) prev = prob return suite
def MakeSuiteFromCdf(cdf, name=None)
Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object
4.121087
4.53552
0.908625
p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
def Percentile(pmf, percentage)
Computes a percentile of a given Pmf. percentage: float 0-100
3.195103
3.716125
0.859794
cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval
def CredibleInterval(pmf, percentage=90)
Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high
4.021679
5.049444
0.79646
total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total
def PmfProbLess(pmf1, pmf2)
Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability
2.186618
2.593497
0.843116
pmf = MakePmfFromList(RandomSum(dists) for i in xrange(n)) return pmf
def SampleSum(dists, n)
Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums
10.112516
10.03589
1.007635
return scipy.stats.norm.pdf(x, mu, sigma)
def EvalGaussianPdf(x, mu, sigma)
Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density
2.828136
5.856881
0.482874
pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in numpy.linspace(low, high, n): p = EvalGaussianPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf
def MakeGaussianPmf(mu, sigma, num_sigmas, n=201)
Makes a PMF discrete approx to a Gaussian distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf
2.47197
2.986527
0.827707
return scipy.stats.binom.pmf(k, n, p)
def EvalBinomialPmf(k, n, p)
Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p.
2.616238
4.40922
0.593356
# don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return scipy.stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / math.factorial(k)
def EvalPoissonPmf(k, lam)
Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability
6.716494
9.422911
0.712783
pmf = Pmf() for k in xrange(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf
def MakePoissonPmf(lam, high, step=1)
Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf
2.855529
3.393513
0.841467
pmf = Pmf() for x in numpy.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf
def MakeExponentialPmf(lam, high, n=200)
Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf
2.874615
3.701621
0.776583
x = ROOT2 * erfinv(2 * p - 1) return mu + x * sigma
def GaussianCdfInverse(p, mu=0, sigma=1)
Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float
9.205755
16.210819
0.567877
return n * log(n) - k * log(k) - (n - k) * log(n - k)
def LogBinomialCoef(n, k)
Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float
2.809964
3.704309
0.758566
return self._Bisect(x, self.xs, self.ys)
def Lookup(self, x)
Looks up x and returns the corresponding value of y.
14.210403
13.650028
1.041053
return self._Bisect(y, self.ys, self.xs)
def Reverse(self, y)
Looks up y and returns the corresponding value of x.
16.839823
15.380613
1.094873
if x <= xs[0]: return ys[0] if x >= xs[-1]: return ys[-1] i = bisect.bisect(xs, x) frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1]) y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1]) return y
def _Bisect(self, x, xs, ys)
Helper function.
1.695212
1.711598
0.990427
for value, prob in values.iteritems(): self.Set(value, prob)
def InitMapping(self, values)
Initializes with a map from value to probability. values: map from value to probability
7.5924
4.711326
1.611521
for value, prob in values.Items(): self.Set(value, prob)
def InitPmf(self, values)
Initializes with a Pmf. values: Pmf object
6.195427
6.359097
0.974262
new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
def Copy(self, name=None)
Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
3.193583
3.744691
0.852829
new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new
def Scale(self, factor)
Multiplies the values by a factor. factor: what to multiply by Returns: new object
9.24218
8.550766
1.08086
if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x)
def Log(self, m=None)
Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0.
6.406359
5.434596
1.178811
if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): self.Set(x, math.exp(p - m))
def Exp(self, m=None)
Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1.
8.186009
7.907649
1.035201
for val, prob in sorted(self.d.iteritems()): print(val, prob)
def Print(self)
Prints the values and freqs/probs in ascending order.
12.495263
5.482506
2.279115
self.d[x] = self.d.get(x, 0) + term
def Incr(self, x, term=1)
Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by
3.822433
4.437024
0.861486
self.d[x] = self.d.get(x, 0) * factor
def Mult(self, x, factor)
Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by
4.443387
5.106601
0.870126
for val, freq in self.Items(): if freq > other.Freq(val): return False return True
def IsSubset(self, other)
Checks whether the values in this histogram are a subset of the values in the given histogram.
6.413909
5.453227
1.176168
for val, freq in other.Items(): self.Incr(val, -freq)
def Subtract(self, other)
Subtracts the values in the given histogram from this histogram.
11.842446
6.854342
1.727729
t = [prob for (val, prob) in self.d.iteritems() if val > x] return sum(t)
def ProbGreater(self, x)
Probability that a sample from this Pmf exceeds x. x: number returns: float probability
6.569071
7.455197
0.88114
if self.log: raise ValueError("Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('total probability is zero.') logging.warning('Normalize: total probability is zero.') return total factor = float(fraction) / total for x in self.d: self.d[x] *= factor return total
def Normalize(self, fraction=1.0)
Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing
6.096394
5.842916
1.043382
if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for x, p in self.d.iteritems(): total += p if total >= target: return x # we shouldn't get here assert False
def Random(self)
Chooses a random element from this PMF. Returns: float value from the Pmf
4.396943
3.765661
1.167642
mu = 0.0 for x, p in self.d.iteritems(): mu += p * x return mu
def Mean(self)
Computes the mean of a PMF. Returns: float mean
5.939332
8.256278
0.719372
if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.iteritems(): var += p * (x - mu) ** 2 return var
def Var(self, mu=None)
Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance
3.374642
3.7384
0.902697
prob, val = max((prob, val) for val, prob in self.Items()) return val
def MaximumLikelihood(self)
Returns the value with the highest probability. Returns: float probability
10.772197
15.495843
0.695167
pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf
def AddPmf(self, other)
Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf
2.61091
2.699088
0.967331
pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf
def AddConstant(self, other)
Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf
5.004385
3.749268
1.334763
cdf = self.MakeCdf() cdf.ps = [p ** k for p in cdf.ps] return cdf
def Max(self, k)
Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf
8.073534
6.257546
1.290208
pmf = Pmf(name=name) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf
def Marginal(self, i, name='')
Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf
6.209553
7.472731
0.830962
pmf = Pmf(name=name) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf
def Conditional(self, i, j, val, name='')
Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf
5.318528
5.051944
1.052768
interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval
def MaxLikeInterval(self, percentage=90)
Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite
3.823292
3.685836
1.037293
if name is None: name = self.name return Cdf(list(self.xs), list(self.ps), name)
def Copy(self, name=None)
Returns a copy of this Cdf. Args: name: string name for the new Cdf
8.0677
6.509786
1.239319
self.xs.append(x) self.ps.append(p)
def Append(self, x, p)
Add an (x, p) pair to the end of this CDF. Note: this us normally used to build a CDF from scratch, not to modify existing CDFs. It is up to the caller to make sure that the result is a legal CDF.
3.680714
4.040795
0.910889
new = self.Copy() new.xs = [x + term for x in self.xs] return new
def Shift(self, term)
Adds a term to the xs. term: how much to add
5.945916
4.410191
1.348222
new = self.Copy() new.xs = [x * factor for x in self.xs] return new
def Scale(self, factor)
Multiplies the xs by a factor. factor: what to multiply by
5.982193
4.282052
1.397039
if x < self.xs[0]: return 0.0 index = bisect.bisect(self.xs, x) p = self.ps[index - 1] return p
def Prob(self, x)
Returns CDF(x), the probability that corresponds to value x. Args: x: number Returns: float probability
3.456695
3.948435
0.875459
if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') if p == 0: return self.xs[0] if p == 1: return self.xs[-1] index = bisect.bisect(self.ps, p) if p == self.ps[index - 1]: return self.xs[index - 1] else: return self.xs[index]
def Value(self, p)
Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value
2.282417
2.255221
1.012059
old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total
def Mean(self)
Computes the mean of a CDF. Returns: float mean
3.941192
4.188351
0.940989
prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
def CredibleInterval(self, percentage=90)
Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high
4.855873
6.5212
0.744629
xs = [self.xs[0]] ps = [0.0] for i, p in enumerate(self.ps): xs.append(self.xs[i]) ps.append(p) try: xs.append(self.xs[i + 1]) ps.append(p) except IndexError: pass return xs, ps
def Render(self)
Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps)
2.901834
2.573385
1.127633
cdf = self.Copy() cdf.ps = [p ** k for p in cdf.ps] return cdf
def Max(self, k)
Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf
7.406421
5.674675
1.305171
for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like)
def LogUpdate(self, data)
Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data
10.496151
11.717123
0.895796
for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize()
def UpdateSet(self, dataset)
Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant
8.202651
7.325121
1.119797
for hypo, prob in sorted(self.Items()): print(hypo, prob)
def Print(self)
Prints the hypotheses and their probabilities.
13.944114
5.416775
2.574246
for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo)
def MakeOdds(self)
Transforms from probabilities to odds. Values with prob=0 are removed.
5.921713
4.129869
1.433874
for hypo, odds in self.Items(): self.Set(hypo, Probability(odds))
def MakeProbs(self)
Transforms from odds to probabilities.
13.998786
7.811191
1.792145
pmf = Pmf(name=name) for x in xs: pmf.Set(x, self.Density(x)) pmf.Normalize() return pmf
def MakePmf(self, xs, name='')
Makes a discrete version of this Pdf, evaluated at xs. xs: equally-spaced sequence of values Returns: new Pmf
2.655089
3.885993
0.683246
heads, tails = data self.alpha += heads self.beta += tails
def Update(self, data)
Updates a Beta distribution. data: pair of int (heads, tails)
13.307222
5.065499
2.627031
size = n, return numpy.random.beta(self.alpha, self.beta, size)
def Sample(self, n)
Generates a random sample from this distribution. n: int sample size
7.524272
9.660597
0.778862
if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in xrange(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = MakePmfFromDict(dict(zip(xs, probs)), name) return pmf
def MakePmf(self, steps=101, name='')
Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences.
3.094404
2.662786
1.162093
xs = [i / (steps - 1.0) for i in xrange(steps)] ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf
def MakeCdf(self, steps=101)
Returns the CDF of this distribution.
2.875486
2.731214
1.052823
m = len(data) self.params[:m] += data
def Update(self, data)
Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params
21.034061
11.902841
1.767146
p = numpy.random.gamma(self.params) return p / p.sum()
def Random(self)
Generates a random variate from this distribution. Returns: normalized vector of fractions
11.590925
10.670274
1.086282
m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod()
def Likelihood(self, data)
Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability
9.656029
9.977488
0.967782
m = len(data) if self.n < m: return float('-inf') x = self.Random() y = numpy.log(x[:m]) * data return y.sum()
def LogLikelihood(self, data)
Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability
7.788824
7.463435
1.043598
alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
def MarginalBeta(self, i)
Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object
6.63554
7.185795
0.923425
alpha0 = self.params.sum() ps = self.params / alpha0 return MakePmfFromItems(zip(xs, ps), name=name)
def PredictivePmf(self, xs, name='')
Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x
8.869351
11.112674
0.798129
value = "" for db, feature in zip(dbs, features): value += db + ":" + feature return value
def _get_ann(dbs, features)
Gives format to annotation for html table output
5.386144
4.828212
1.115557
safe_dirs(out_dir) main_table = [] header = ['id', 'ann'] n = len(data[0]) bar = ProgressBar(maxval=n) bar.start() bar.update(0) for itern, c in enumerate(data[0]): bar.update(itern) logger.debug("creating cluser: {}".format(c)) safe_dirs(os.path.join(out_dir, c)) valid, ann, pos_structure = _single_cluster(c, data, os.path.join(out_dir, c, "maps.tsv"), args) data[0][c].update({'profile': pos_structure}) loci = data[0][c]['loci'] data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)} data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"]) data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq'])) return data
def make_profile(data, out_dir, args)
Make data report for each cluster
5.279655
5.001736
1.055564
for pos in range(start, end): for s in counts: dat[s][pos] += counts[s] return dat
def _expand(dat, counts, start, end)
expand the same counts from start to end
4.049632
3.817964
1.060678
dat = defaultdict(Counter) if isinstance(in_file, (str, unicode)): with open(in_file) as in_handle: for line in in_handle: cols = line.strip().split("\t") counts = freq[cols[3]] dat = _expand(dat, counts, int(cols[1]), int(cols[2])) else: if raw_file: out_handle = open(raw_file, "w") for name in in_file: counts = freq[name] if raw_file: print("%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+"), file=out_handle, end="") dat = _expand(dat, counts, in_file[name][0], in_file[name][1]) for s in dat: for p in dat[s]: dat[s][p] = mlog2(dat[s][p] + 1) return dat
def _convert_to_df(in_file, freq, raw_file)
convert data frame into table with pandas
2.606626
2.592179
1.005573
ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann) valid = [l for l in c['valid']] ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid] return valid, ann_list
def _make(c)
create html from template, adding figure, annotation and sequences counts
7.080165
7.133482
0.992526
valid, ann = 0, 0 raw_file = None freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']] names = [s.keys()[0] for s in data[0][c]['seqs']] seqs = [s.values()[0] for s in data[0][c]['seqs']] loci = data[0][c]['loci'] if loci[0][3] - loci[0][2] > 500: logger.info("locus bigger > 500 nt, skipping: %s" % loci) return valid, ann, {} if not file_exists(out_file): if args.razer: logger.debug("map with razer all sequences to all loci %s " % loci) map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args) else: logger.debug("map with biopython fn all sequences to all loci %s " % loci) if args.debug: raw_file = out_file out_file = map_to_precursor_biopython(seqs, names, loci[0][0:5], args) logger.debug("plot sequences on loci") df = _convert_to_df(out_file, freq, raw_file) if df: valid, ann = _make(data[0][c]) return valid, ann, df
def _single_cluster(c, data, out_file, args)
Map sequences on precursors and create expression profile
4.379087
4.137674
1.058345
cl = cluster(1) # seqs = [s.values()[0] for s in data['seqs']] names = [s.keys()[0] for s in data['seqs']] cl.add_id_member(names, 1) freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data['freq']]
def read_cluster(data, id=1)
Read json cluster and populate as cluster class
5.434464
5.0732
1.07121
with open(out_file, 'w') as handle_out: handle_out.write(json.dumps([data], skipkeys=True, indent=2))
def write_data(data, out_file)
write json file from seqcluster cluster
3.455377
3.362915
1.027495
seqs1 = data[c1]['seqs'] seqs2 = data[c2]['seqs'] seqs = list(set(seqs1 + seqs2)) names = [] for s in seqs: if s in seqs1 and s in seqs2: names.append("both") elif s in seqs1: names.append(c1) else: names.append(c2) return seqs, names
def get_sequences_from_cluster(c1, c2, data)
get all sequences from on cluster
1.906992
1.900238
1.003554
with make_temp_directory() as temp: pre_fasta = os.path.join(temp, "pre.fa") seqs_fasta = os.path.join(temp, "seqs.fa") out_sam = os.path.join(temp, "out.sam") pre_fasta = get_loci_fasta(loci, pre_fasta, args.ref) out_precursor_file = out_file.replace("tsv", "fa") seqs_fasta = get_seqs_fasta(seqs, names, seqs_fasta) # print(open(pre_fasta).read().split("\n")[1]) if find_cmd("razers3"): cmd = "razers3 -dr 2 -i 80 -rr 90 -f -o {out_sam} {temp}/pre.fa {seqs_fasta}" run(cmd.format(**locals())) out_file = read_alignment(out_sam, loci, seqs, out_file) shutil.copy(pre_fasta, out_precursor_file) return out_file
def map_to_precursors(seqs, names, loci, out_file, args)
map sequences to precursors with razers3
3.880691
3.577474
1.084757
region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
def precursor_sequence(loci, reference)
Get sequence from genome
3.547792
3.369927
1.05278
precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3) if res > -1: dat[n] = [res, res + len(s)] logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
def map_to_precursors_on_fly(seqs, names, loci, args)
map sequences to precursors with franpr algorithm to avoid writting on disk
5.2126
5.074579
1.027199
if local: aligned_x = pairwise2.align.localxx(x, y) else: aligned_x = pairwise2.align.globalms(x, y, 1, -1, -1, -0.5) if aligned_x: sorted_alignments = sorted(aligned_x, key=operator.itemgetter(2)) e = enumerate(sorted_alignments[0][0]) nts = [i for i,c in e if c != "-"] return [min(nts), max(nts)]
def _align(x, y, local = False)
https://medium.com/towards-data-science/pairwise-sequence-alignment-using-biopython-d1a9d0ba861f
3.497773
3.118642
1.121569
precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor) if res: dat[n] = res logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
def map_to_precursor_biopython(seqs, names, loci, args)
map the sequences using biopython package
4.532621
4.447141
1.019221
with open(out_fa, 'w') as fa_handle: for s, n in itertools.izip(seqs, names): print(">cx{1}-{0}\n{0}".format(s, n), file=fa_handle) return out_fa
def get_seqs_fasta(seqs, names, out_fa)
get fasta from sequences
3.338426
3.257781
1.024754
if not find_cmd("bedtools"): raise ValueError("Not bedtools installed") with make_temp_directory() as temp: bed_file = os.path.join(temp, "file.bed") for nc, loci in loci.iteritems(): for l in loci: with open(bed_file, 'w') as bed_handle: logger.debug("get_fasta: loci %s" % l) nc, c, s, e, st = l print("{0}\t{1}\t{2}\t{3}\t{3}\t{4}".format(c, s, e, nc, st), file=bed_handle) get_fasta(bed_file, ref, out_fa) return out_fa
def get_loci_fasta(loci, out_fa, ref)
get fasta from precursor
3.63244
3.508903
1.035207
hits = defaultdict(list) with open(out_file, "w") as out_handle: samfile = pysam.Samfile(out_sam, "r") for a in samfile.fetch(): if not a.is_unmapped: nm = int([t[1] for t in a.tags if t[0] == "NM"][0]) a = makeBED(a) if not a: continue ref, locus = get_loci(samfile.getrname(int(a.chr)), loci) hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end))) for hit in hits.values(): nm = hit[0][0] for l in hit: if nm == l[0]: print(l[1], file=out_handle) return out_file
def read_alignment(out_sam, loci, seqs, out_file)
read which seqs map to which loci and return a tab separated file
3.047121
3.10412
0.981638
if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
def _download_mirbase(args, version="CURRENT")
Download files from mirbase
2.503457
2.463567
1.016192
p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x") return ">%s_%s_x%s" % (tags[0], idx, tags[1]) return name.replace("@", ">")
def _make_unique(name, idx)
Make name unique in case only counts there
5.522937
5.551846
0.994793
out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
def _filter_seqs(fn)
Convert names of sequences to unique ids
2.966031
2.877013
1.030941
hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"): if hairpin[name]: hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" name = line.strip().replace(">", " ").split()[0] else: hairpin[name] += line.strip() hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" return hairpin
def _read_precursor(precursor, sps)
Load precursor file for that species
2.714922
2.549797
1.06476
if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
def _read_gtf(gtf)
Load GTF file with precursor positions on genome
2.278072
2.203465
1.033859
dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None if end > mirna[1]: iso.t3 = sequence[-dif:].upper() elif end < mirna[1]: iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower() elif end == mirna[1]: iso.t3 = "NA" if dif > 4: logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format())) return None logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format())) return True
def _coord(sequence, start, mirna, precursor, iso)
Define t5 and t3 isomirs
2.352959
2.302657
1.021845
for r in reads: for p in reads[r].precursors: start = reads[r].precursors[p].start + 1 # convert to 1base end = start + len(reads[r].sequence) for mature in mirbase_ref[p]: mi = mirbase_ref[p][mature] is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p]) logger.debug(("{r} {p} {start} {is_iso} {mature} {mi} {mature_s}").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals())) if is_iso: reads[r].precursors[p].mirna = mature break return reads
def _annotate(reads, mirbase_ref, precursors)
Using SAM/BAM coordinates, mismatches and realign to annotate isomiRs
3.574391
3.410362
1.048097
error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error: pattern.append(1) error_add.append(e) else: pattern.append(0) for p in pattern_addition: if pattern == p: add = seq[error_add[0]:] break if not add and error_add: for e in error_add: subs.append([e, seq[e], precursor[start + e]]) return subs, add
def _realign(seq, precursor, start)
The actual fn that will realign the sequence
2.628346
2.613379
1.005727