_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q13700
default_links_factory_with_additional
train
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
python
{ "resource": "" }
q13701
geolocation_sort
train
def geolocation_sort(field_name, argument, unit, mode=None, distance_type=None): """Sort field factory for geo-location based sorting. :param argument: Name of URL query string field to parse pin location from. Multiple locations can be provided. Each location can be either a string "latitude,longitude" or a geohash. :param unit: Distance unit (e.g. km). :param mode: Sort mode (avg, min, max). :param distance_type: Distance calculation mode. :returns: Function that returns geolocation sort field. """ def inner(asc): locations = request.values.getlist(argument, type=str) field = { '_geo_distance': { field_name: locations, 'order': 'asc' if asc else 'desc', 'unit': unit, } } if mode: field['_geo_distance']['mode'] = mode if distance_type: field['_geo_distance']['distance_type'] = distance_type return field return inner
python
{ "resource": "" }
q13702
eval_field
train
def eval_field(field, asc): """Evaluate a field for sorting purpose. :param field: Field definition (string, dict or callable). :param asc: ``True`` if order is ascending, ``False`` if descending. :returns: Dictionary with the sort field query. """ if isinstance(field, dict): if asc: return field else: # Field should only have one key and must have an order subkey. field = copy.deepcopy(field) key = list(field.keys())[0] field[key]['order'] = reverse_order(field[key]['order']) return field elif callable(field): return field(asc) else: key, key_asc = parse_sort_field(field) if not asc: key_asc = not key_asc return {key: {'order': 'asc' if key_asc else 'desc'}}
python
{ "resource": "" }
q13703
default_sorter_factory
train
def default_sorter_factory(search, index): """Default sort query factory. :param query: Search query. :param index: Index to search in. :returns: Tuple of (query, URL arguments). """ sort_arg_name = 'sort' urlfield = request.values.get(sort_arg_name, '', type=str) # Get default sorting if sort is not specified. if not urlfield: # cast to six.text_type to handle unicodes in Python 2 has_query = request.values.get('q', type=six.text_type) urlfield = current_app.config['RECORDS_REST_DEFAULT_SORT'].get( index, {}).get('query' if has_query else 'noquery', '') # Parse sort argument key, asc = parse_sort_field(urlfield) # Get sort options sort_options = current_app.config['RECORDS_REST_SORT_OPTIONS'].get( index, {}).get(key) if sort_options is None: return (search, {}) # Get fields to sort query by search = search.sort( *[eval_field(f, asc) for f in sort_options['fields']] ) return (search, {sort_arg_name: urlfield})
python
{ "resource": "" }
q13704
RecordMetadataSchemaJSONV1.inject_pid
train
def inject_pid(self, data): """Inject context PID in the RECID field.""" # Remove already deserialized "pid" field pid_value = data.pop('pid', None) if pid_value: pid_field = current_app.config['PIDSTORE_RECID_FIELD'] data.setdefault(pid_field, pid_value) return data
python
{ "resource": "" }
q13705
BasicNode.get_render
train
def get_render(self, context): """ Returns a `Context` object with all the necesarry stuff for rendering the form :param context: `django.template.Context` variable holding the context for the node `self.form` and `self.helper` are resolved into real Python objects resolving them from the `context`. The `actual_form` can be a form or a formset. If it's a formset `is_formset` is set to True. If the helper has a layout we use it, for rendering the form or the formset's forms. """ actual_form = self.form.resolve(context) attrs = {} if self.helper is not None: helper = self.helper.resolve(context) if not isinstance(helper, FormHelper): raise TypeError('helper object provided to uni_form tag must be a uni_form.helpers.FormHelper object.') attrs = helper.get_attributes() else: helper = None # We get the response dictionary is_formset = isinstance(actual_form, BaseFormSet) response_dict = self.get_response_dict(attrs, context, is_formset) # If we have a helper's layout we use it, for the form or the formset's forms if helper and helper.layout: if not is_formset: actual_form.form_html = helper.render_layout(actual_form, context) else: forloop = ForLoopSimulator(actual_form) for form in actual_form.forms: context.update({'forloop': forloop}) form.form_html = helper.render_layout(form, context) forloop.iterate() if is_formset: response_dict.update({'formset': actual_form}) else: response_dict.update({'form': actual_form}) return Context(response_dict)
python
{ "resource": "" }
q13706
FormHelper.get_attributes
train
def get_attributes(self): """ Used by the uni_form_tags to get helper attributes """ items = {} items['form_method'] = self.form_method.strip() items['form_tag'] = self.form_tag items['form_style'] = self.form_style.strip() if self.form_action: items['form_action'] = self.form_action.strip() if self.form_id: items['id'] = self.form_id.strip() if self.form_class: items['class'] = self.form_class.strip() if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() return items
python
{ "resource": "" }
q13707
sequence_unique.add_exp
train
def add_exp(self,gr,exp): """Function to add the counts for each sample :param gr: name of the sample :param exp: counts of sample **gr** :returns: dict with key,values equally to name,counts. """ self.group[gr] = exp self.total = sum(self.group.values())
python
{ "resource": "" }
q13708
MakeJoint
train
def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint
python
{ "resource": "" }
q13709
MakeHistFromList
train
def MakeHistFromList(t, name=''): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this histogram Returns: Hist object """ hist = Hist(name=name) [hist.Incr(x) for x in t] return hist
python
{ "resource": "" }
q13710
MakePmfFromList
train
def MakePmfFromList(t, name=''): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object """ hist = MakeHistFromList(t) d = hist.GetDict() pmf = Pmf(d, name) pmf.Normalize() return pmf
python
{ "resource": "" }
q13711
MakePmfFromDict
train
def MakePmfFromDict(d, name=''): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this PMF Returns: Pmf object """ pmf = Pmf(d, name) pmf.Normalize() return pmf
python
{ "resource": "" }
q13712
MakePmfFromItems
train
def MakePmfFromItems(t, name=''): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object """ pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
python
{ "resource": "" }
q13713
MakePmfFromHist
train
def MakePmfFromHist(hist, name=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) pmf = Pmf(d, name) pmf.Normalize() return pmf
python
{ "resource": "" }
q13714
MakePmfFromCdf
train
def MakePmfFromCdf(cdf, name=None): """Makes a normalized Pmf from a Cdf object. Args: cdf: Cdf object name: string name for the new Pmf Returns: Pmf object """ if name is None: name = cdf.name pmf = Pmf(name=name) prev = 0.0 for val, prob in cdf.Items(): pmf.Incr(val, prob - prev) prev = prob return pmf
python
{ "resource": "" }
q13715
MakeMixture
train
def MakeMixture(metapmf, name='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name for the new Pmf. Returns: Pmf object. """ mix = Pmf(name=name) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix
python
{ "resource": "" }
q13716
MakeUniformPmf
train
def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in numpy.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
python
{ "resource": "" }
q13717
MakeCdfFromPmf
train
def MakeCdfFromPmf(pmf, name=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object name: string name for the data. Returns: Cdf object """ if name == None: name = pmf.name return MakeCdfFromItems(pmf.Items(), name)
python
{ "resource": "" }
q13718
MakeSuiteFromList
train
def MakeSuiteFromList(t, name=''): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this suite Returns: Suite object """ hist = MakeHistFromList(t) d = hist.GetDict() return MakeSuiteFromDict(d)
python
{ "resource": "" }
q13719
MakeSuiteFromHist
train
def MakeSuiteFromHist(hist, name=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object """ if name is None: name = hist.name # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, name)
python
{ "resource": "" }
q13720
MakeSuiteFromDict
train
def MakeSuiteFromDict(d, name=''): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object """ suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
python
{ "resource": "" }
q13721
MakeSuiteFromCdf
train
def MakeSuiteFromCdf(cdf, name=None): """Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object """ if name is None: name = cdf.name suite = Suite(name=name) prev = 0.0 for val, prob in cdf.Items(): suite.Incr(val, prob - prev) prev = prob return suite
python
{ "resource": "" }
q13722
Percentile
train
def Percentile(pmf, percentage): """Computes a percentile of a given Pmf. percentage: float 0-100 """ p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
python
{ "resource": "" }
q13723
CredibleInterval
train
def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval
python
{ "resource": "" }
q13724
PmfProbLess
train
def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total
python
{ "resource": "" }
q13725
SampleSum
train
def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = MakePmfFromList(RandomSum(dists) for i in xrange(n)) return pmf
python
{ "resource": "" }
q13726
EvalGaussianPdf
train
def EvalGaussianPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return scipy.stats.norm.pdf(x, mu, sigma)
python
{ "resource": "" }
q13727
EvalBinomialPmf
train
def EvalBinomialPmf(k, n, p): """Evaluates the binomial pmf. Returns the probabily of k successes in n trials with probability p. """ return scipy.stats.binom.pmf(k, n, p)
python
{ "resource": "" }
q13728
EvalPoissonPmf
train
def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return scipy.stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / math.factorial(k)
python
{ "resource": "" }
q13729
MakePoissonPmf
train
def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in xrange(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf
python
{ "resource": "" }
q13730
MakeExponentialPmf
train
def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in numpy.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf
python
{ "resource": "" }
q13731
GaussianCdfInverse
train
def GaussianCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ x = ROOT2 * erfinv(2 * p - 1) return mu + x * sigma
python
{ "resource": "" }
q13732
LogBinomialCoef
train
def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * log(n) - k * log(k) - (n - k) * log(n - k)
python
{ "resource": "" }
q13733
Interpolator.Lookup
train
def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys)
python
{ "resource": "" }
q13734
Interpolator.Reverse
train
def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs)
python
{ "resource": "" }
q13735
_DictWrapper.InitMapping
train
def InitMapping(self, values): """Initializes with a map from value to probability. values: map from value to probability """ for value, prob in values.iteritems(): self.Set(value, prob)
python
{ "resource": "" }
q13736
_DictWrapper.InitPmf
train
def InitPmf(self, values): """Initializes with a Pmf. values: Pmf object """ for value, prob in values.Items(): self.Set(value, prob)
python
{ "resource": "" }
q13737
_DictWrapper.Copy
train
def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist """ new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
python
{ "resource": "" }
q13738
_DictWrapper.Scale
train
def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new
python
{ "resource": "" }
q13739
_DictWrapper.Log
train
def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x)
python
{ "resource": "" }
q13740
_DictWrapper.Exp
train
def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.iteritems(): self.Set(x, math.exp(p - m))
python
{ "resource": "" }
q13741
Hist.IsSubset
train
def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True
python
{ "resource": "" }
q13742
Hist.Subtract
train
def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq)
python
{ "resource": "" }
q13743
Pmf.ProbGreater
train
def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ t = [prob for (val, prob) in self.d.iteritems() if val > x] return sum(t)
python
{ "resource": "" }
q13744
Pmf.Normalize
train
def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('total probability is zero.') logging.warning('Normalize: total probability is zero.') return total factor = float(fraction) / total for x in self.d: self.d[x] *= factor return total
python
{ "resource": "" }
q13745
Pmf.Random
train
def Random(self): """Chooses a random element from this PMF. Returns: float value from the Pmf """ if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for x, p in self.d.iteritems(): total += p if total >= target: return x # we shouldn't get here assert False
python
{ "resource": "" }
q13746
Pmf.Mean
train
def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mu = 0.0 for x, p in self.d.iteritems(): mu += p * x return mu
python
{ "resource": "" }
q13747
Pmf.Var
train
def Var(self, mu=None): """Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.iteritems(): var += p * (x - mu) ** 2 return var
python
{ "resource": "" }
q13748
Pmf.MaximumLikelihood
train
def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ prob, val = max((prob, val) for val, prob in self.Items()) return val
python
{ "resource": "" }
q13749
Pmf.AddPmf
train
def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf
python
{ "resource": "" }
q13750
Pmf.AddConstant
train
def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf
python
{ "resource": "" }
q13751
Joint.Marginal
train
def Marginal(self, i, name=''): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(name=name) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf
python
{ "resource": "" }
q13752
Joint.Conditional
train
def Conditional(self, i, j, val, name=''): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(name=name) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf
python
{ "resource": "" }
q13753
Joint.MaxLikeInterval
train
def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval
python
{ "resource": "" }
q13754
Cdf.Copy
train
def Copy(self, name=None): """Returns a copy of this Cdf. Args: name: string name for the new Cdf """ if name is None: name = self.name return Cdf(list(self.xs), list(self.ps), name)
python
{ "resource": "" }
q13755
Cdf.Shift
train
def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() new.xs = [x + term for x in self.xs] return new
python
{ "resource": "" }
q13756
Cdf.Scale
train
def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() new.xs = [x * factor for x in self.xs] return new
python
{ "resource": "" }
q13757
Cdf.Mean
train
def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total
python
{ "resource": "" }
q13758
Cdf.CredibleInterval
train
def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
python
{ "resource": "" }
q13759
Cdf.Render
train
def Render(self): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps) """ xs = [self.xs[0]] ps = [0.0] for i, p in enumerate(self.ps): xs.append(self.xs[i]) ps.append(p) try: xs.append(self.xs[i + 1]) ps.append(p) except IndexError: pass return xs, ps
python
{ "resource": "" }
q13760
Suite.LogUpdate
train
def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like)
python
{ "resource": "" }
q13761
Suite.UpdateSet
train
def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize()
python
{ "resource": "" }
q13762
Suite.Print
train
def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob)
python
{ "resource": "" }
q13763
Suite.MakeOdds
train
def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo)
python
{ "resource": "" }
q13764
Suite.MakeProbs
train
def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds))
python
{ "resource": "" }
q13765
Pdf.MakePmf
train
def MakePmf(self, xs, name=''): """Makes a discrete version of this Pdf, evaluated at xs. xs: equally-spaced sequence of values Returns: new Pmf """ pmf = Pmf(name=name) for x in xs: pmf.Set(x, self.Density(x)) pmf.Normalize() return pmf
python
{ "resource": "" }
q13766
Beta.Update
train
def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails
python
{ "resource": "" }
q13767
Beta.Sample
train
def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return numpy.random.beta(self.alpha, self.beta, size)
python
{ "resource": "" }
q13768
Beta.MakePmf
train
def MakePmf(self, steps=101, name=''): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in xrange(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = MakePmfFromDict(dict(zip(xs, probs)), name) return pmf
python
{ "resource": "" }
q13769
Beta.MakeCdf
train
def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in xrange(steps)] ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf
python
{ "resource": "" }
q13770
Dirichlet.Update
train
def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data
python
{ "resource": "" }
q13771
Dirichlet.Random
train
def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = numpy.random.gamma(self.params) return p / p.sum()
python
{ "resource": "" }
q13772
Dirichlet.Likelihood
train
def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod()
python
{ "resource": "" }
q13773
Dirichlet.LogLikelihood
train
def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = numpy.log(x[:m]) * data return y.sum()
python
{ "resource": "" }
q13774
Dirichlet.MarginalBeta
train
def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
python
{ "resource": "" }
q13775
Dirichlet.PredictivePmf
train
def PredictivePmf(self, xs, name=''): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return MakePmfFromItems(zip(xs, ps), name=name)
python
{ "resource": "" }
q13776
_get_ann
train
def _get_ann(dbs, features): """ Gives format to annotation for html table output """ value = "" for db, feature in zip(dbs, features): value += db + ":" + feature return value
python
{ "resource": "" }
q13777
make_profile
train
def make_profile(data, out_dir, args): """ Make data report for each cluster """ safe_dirs(out_dir) main_table = [] header = ['id', 'ann'] n = len(data[0]) bar = ProgressBar(maxval=n) bar.start() bar.update(0) for itern, c in enumerate(data[0]): bar.update(itern) logger.debug("creating cluser: {}".format(c)) safe_dirs(os.path.join(out_dir, c)) valid, ann, pos_structure = _single_cluster(c, data, os.path.join(out_dir, c, "maps.tsv"), args) data[0][c].update({'profile': pos_structure}) loci = data[0][c]['loci'] data[0][c]['precursor'] = {"seq": precursor_sequence(loci[0][0:5], args.ref)} data[0][c]['precursor']["colors"] = _parse(data[0][c]['profile'], data[0][c]['precursor']["seq"]) data[0][c]['precursor'].update(run_rnafold(data[0][c]['precursor']['seq'])) return data
python
{ "resource": "" }
q13778
_expand
train
def _expand(dat, counts, start, end): """ expand the same counts from start to end """ for pos in range(start, end): for s in counts: dat[s][pos] += counts[s] return dat
python
{ "resource": "" }
q13779
_convert_to_df
train
def _convert_to_df(in_file, freq, raw_file): """ convert data frame into table with pandas """ dat = defaultdict(Counter) if isinstance(in_file, (str, unicode)): with open(in_file) as in_handle: for line in in_handle: cols = line.strip().split("\t") counts = freq[cols[3]] dat = _expand(dat, counts, int(cols[1]), int(cols[2])) else: if raw_file: out_handle = open(raw_file, "w") for name in in_file: counts = freq[name] if raw_file: print("%s\t%s\t%s\t%s\t%s\t%s" % ("chr", in_file[name][0], in_file[name][1], name, sum(counts.values()), "+"), file=out_handle, end="") dat = _expand(dat, counts, in_file[name][0], in_file[name][1]) for s in dat: for p in dat[s]: dat[s][p] = mlog2(dat[s][p] + 1) return dat
python
{ "resource": "" }
q13780
_make
train
def _make(c): """ create html from template, adding figure, annotation and sequences counts """ ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann) valid = [l for l in c['valid']] ann_list = [", ".join(list(set(ann[feature]))) for feature in ann if feature in valid] return valid, ann_list
python
{ "resource": "" }
q13781
_single_cluster
train
def _single_cluster(c, data, out_file, args): """ Map sequences on precursors and create expression profile """ valid, ann = 0, 0 raw_file = None freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']] names = [s.keys()[0] for s in data[0][c]['seqs']] seqs = [s.values()[0] for s in data[0][c]['seqs']] loci = data[0][c]['loci'] if loci[0][3] - loci[0][2] > 500: logger.info("locus bigger > 500 nt, skipping: %s" % loci) return valid, ann, {} if not file_exists(out_file): if args.razer: logger.debug("map with razer all sequences to all loci %s " % loci) map_to_precursors(seqs, names, {loci[0][0]: [loci[0][0:5]]}, out_file, args) else: logger.debug("map with biopython fn all sequences to all loci %s " % loci) if args.debug: raw_file = out_file out_file = map_to_precursor_biopython(seqs, names, loci[0][0:5], args) logger.debug("plot sequences on loci") df = _convert_to_df(out_file, freq, raw_file) if df: valid, ann = _make(data[0][c]) return valid, ann, df
python
{ "resource": "" }
q13782
read_cluster
train
def read_cluster(data, id=1): """Read json cluster and populate as cluster class""" cl = cluster(1) # seqs = [s.values()[0] for s in data['seqs']] names = [s.keys()[0] for s in data['seqs']] cl.add_id_member(names, 1) freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data['freq']]
python
{ "resource": "" }
q13783
write_data
train
def write_data(data, out_file): """write json file from seqcluster cluster""" with open(out_file, 'w') as handle_out: handle_out.write(json.dumps([data], skipkeys=True, indent=2))
python
{ "resource": "" }
q13784
get_sequences_from_cluster
train
def get_sequences_from_cluster(c1, c2, data): """get all sequences from on cluster""" seqs1 = data[c1]['seqs'] seqs2 = data[c2]['seqs'] seqs = list(set(seqs1 + seqs2)) names = [] for s in seqs: if s in seqs1 and s in seqs2: names.append("both") elif s in seqs1: names.append(c1) else: names.append(c2) return seqs, names
python
{ "resource": "" }
q13785
map_to_precursors
train
def map_to_precursors(seqs, names, loci, out_file, args): """map sequences to precursors with razers3""" with make_temp_directory() as temp: pre_fasta = os.path.join(temp, "pre.fa") seqs_fasta = os.path.join(temp, "seqs.fa") out_sam = os.path.join(temp, "out.sam") pre_fasta = get_loci_fasta(loci, pre_fasta, args.ref) out_precursor_file = out_file.replace("tsv", "fa") seqs_fasta = get_seqs_fasta(seqs, names, seqs_fasta) # print(open(pre_fasta).read().split("\n")[1]) if find_cmd("razers3"): cmd = "razers3 -dr 2 -i 80 -rr 90 -f -o {out_sam} {temp}/pre.fa {seqs_fasta}" run(cmd.format(**locals())) out_file = read_alignment(out_sam, loci, seqs, out_file) shutil.copy(pre_fasta, out_precursor_file) return out_file
python
{ "resource": "" }
q13786
precursor_sequence
train
def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
python
{ "resource": "" }
q13787
map_to_precursors_on_fly
train
def map_to_precursors_on_fly(seqs, names, loci, args): """map sequences to precursors with franpr algorithm to avoid writting on disk""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3) if res > -1: dat[n] = [res, res + len(s)] logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
python
{ "resource": "" }
q13788
map_to_precursor_biopython
train
def map_to_precursor_biopython(seqs, names, loci, args): """map the sequences using biopython package""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor) if res: dat[n] = res logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
python
{ "resource": "" }
q13789
get_seqs_fasta
train
def get_seqs_fasta(seqs, names, out_fa): """get fasta from sequences""" with open(out_fa, 'w') as fa_handle: for s, n in itertools.izip(seqs, names): print(">cx{1}-{0}\n{0}".format(s, n), file=fa_handle) return out_fa
python
{ "resource": "" }
q13790
get_loci_fasta
train
def get_loci_fasta(loci, out_fa, ref): """get fasta from precursor""" if not find_cmd("bedtools"): raise ValueError("Not bedtools installed") with make_temp_directory() as temp: bed_file = os.path.join(temp, "file.bed") for nc, loci in loci.iteritems(): for l in loci: with open(bed_file, 'w') as bed_handle: logger.debug("get_fasta: loci %s" % l) nc, c, s, e, st = l print("{0}\t{1}\t{2}\t{3}\t{3}\t{4}".format(c, s, e, nc, st), file=bed_handle) get_fasta(bed_file, ref, out_fa) return out_fa
python
{ "resource": "" }
q13791
read_alignment
train
def read_alignment(out_sam, loci, seqs, out_file): """read which seqs map to which loci and return a tab separated file""" hits = defaultdict(list) with open(out_file, "w") as out_handle: samfile = pysam.Samfile(out_sam, "r") for a in samfile.fetch(): if not a.is_unmapped: nm = int([t[1] for t in a.tags if t[0] == "NM"][0]) a = makeBED(a) if not a: continue ref, locus = get_loci(samfile.getrname(int(a.chr)), loci) hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end))) for hit in hits.values(): nm = hit[0][0] for l in hit: if nm == l[0]: print(l[1], file=out_handle) return out_file
python
{ "resource": "" }
q13792
_download_mirbase
train
def _download_mirbase(args, version="CURRENT"): """ Download files from mirbase """ if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
python
{ "resource": "" }
q13793
_make_unique
train
def _make_unique(name, idx): """Make name unique in case only counts there""" p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x") return ">%s_%s_x%s" % (tags[0], idx, tags[1]) return name.replace("@", ">")
python
{ "resource": "" }
q13794
_filter_seqs
train
def _filter_seqs(fn): """Convert names of sequences to unique ids""" out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
python
{ "resource": "" }
q13795
_read_precursor
train
def _read_precursor(precursor, sps): """ Load precursor file for that species """ hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"): if hairpin[name]: hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" name = line.strip().replace(">", " ").split()[0] else: hairpin[name] += line.strip() hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" return hairpin
python
{ "resource": "" }
q13796
_read_gtf
train
def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
python
{ "resource": "" }
q13797
_coord
train
def _coord(sequence, start, mirna, precursor, iso): """ Define t5 and t3 isomirs """ dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None if end > mirna[1]: iso.t3 = sequence[-dif:].upper() elif end < mirna[1]: iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower() elif end == mirna[1]: iso.t3 = "NA" if dif > 4: logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format())) return None logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format())) return True
python
{ "resource": "" }
q13798
_realign
train
def _realign(seq, precursor, start): """ The actual fn that will realign the sequence """ error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error: pattern.append(1) error_add.append(e) else: pattern.append(0) for p in pattern_addition: if pattern == p: add = seq[error_add[0]:] break if not add and error_add: for e in error_add: subs.append([e, seq[e], precursor[start + e]]) return subs, add
python
{ "resource": "" }
q13799
_clean_hits
train
def _clean_hits(reads): """ Select only best matches """ new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s %s" % (r, p, world[p])) if sc != world[p]: logger.debug("remove %s %s %s" % (r, p, world[p])) new_reads[r].remove_precursor(p) return new_reads
python
{ "resource": "" }