repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
rodluger/everest
everest/transit.py
Get_rhos
def Get_rhos(dur, **kwargs): ''' Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration." def Dur(rhos, **kwargs): t0 = kwargs.get('t0', 0.) time = np.linspace(t0 - 0.5, t0 + 0.5, 1000) try: t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)] except: return 0. return t[-1] - t[0] def DiffSq(rhos): return (dur - Dur(rhos, **kwargs)) ** 2 return fmin(DiffSq, [0.2], disp=False)
python
def Get_rhos(dur, **kwargs): ''' Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration." def Dur(rhos, **kwargs): t0 = kwargs.get('t0', 0.) time = np.linspace(t0 - 0.5, t0 + 0.5, 1000) try: t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)] except: return 0. return t[-1] - t[0] def DiffSq(rhos): return (dur - Dur(rhos, **kwargs)) ** 2 return fmin(DiffSq, [0.2], disp=False)
[ "def", "Get_rhos", "(", "dur", ",", "*", "*", "kwargs", ")", ":", "if", "ps", "is", "None", ":", "raise", "Exception", "(", "\"Unable to import `pysyzygy`.\"", ")", "assert", "dur", ">=", "0.01", "and", "dur", "<=", "0.5", ",", "\"Invalid value for the duration.\"", "def", "Dur", "(", "rhos", ",", "*", "*", "kwargs", ")", ":", "t0", "=", "kwargs", ".", "get", "(", "'t0'", ",", "0.", ")", "time", "=", "np", ".", "linspace", "(", "t0", "-", "0.5", ",", "t0", "+", "0.5", ",", "1000", ")", "try", ":", "t", "=", "time", "[", "np", ".", "where", "(", "ps", ".", "Transit", "(", "rhos", "=", "rhos", ",", "*", "*", "kwargs", ")", "(", "time", ")", "<", "1", ")", "]", "except", ":", "return", "0.", "return", "t", "[", "-", "1", "]", "-", "t", "[", "0", "]", "def", "DiffSq", "(", "rhos", ")", ":", "return", "(", "dur", "-", "Dur", "(", "rhos", ",", "*", "*", "kwargs", ")", ")", "**", "2", "return", "fmin", "(", "DiffSq", ",", "[", "0.2", "]", ",", "disp", "=", "False", ")" ]
Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
[ "Returns", "the", "value", "of", "the", "stellar", "density", "for", "a", "given", "transit", "duration", ":", "py", ":", "obj", ":", "dur", "given", "the", ":", "py", ":", "class", ":", "everest", ".", "pysyzygy", "transit", ":", "py", ":", "obj", ":", "kwargs", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/transit.py#L137-L161
rodluger/everest
everest/transit.py
Transit
def Transit(time, t0=0., dur=0.1, per=3.56789, depth=0.001, **kwargs): ''' A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model, but with the depth and the duration as primary input variables. :param numpy.ndarray time: The time array :param float t0: The time of first transit in units of \ :py:obj:`BJD` - 2454833. :param float dur: The transit duration in days. Don't go too crazy on \ this one -- very small or very large values will break the \ inverter. Default 0.1 :param float per: The orbital period in days. Default 3.56789 :param float depth: The fractional transit depth. Default 0.001 :param dict kwargs: Any additional keyword arguments, passed directly \ to :py:func:`pysyzygy.Transit` :returns tmod: The transit model evaluated at the same times as the \ :py:obj:`time` array ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") # Note that rhos can affect RpRs, so we should really do this iteratively, # but the effect is pretty negligible! RpRs = Get_RpRs(depth, t0=t0, per=per, **kwargs) rhos = Get_rhos(dur, t0=t0, per=per, **kwargs) return ps.Transit(t0=t0, per=per, RpRs=RpRs, rhos=rhos, **kwargs)(time)
python
def Transit(time, t0=0., dur=0.1, per=3.56789, depth=0.001, **kwargs): ''' A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model, but with the depth and the duration as primary input variables. :param numpy.ndarray time: The time array :param float t0: The time of first transit in units of \ :py:obj:`BJD` - 2454833. :param float dur: The transit duration in days. Don't go too crazy on \ this one -- very small or very large values will break the \ inverter. Default 0.1 :param float per: The orbital period in days. Default 3.56789 :param float depth: The fractional transit depth. Default 0.001 :param dict kwargs: Any additional keyword arguments, passed directly \ to :py:func:`pysyzygy.Transit` :returns tmod: The transit model evaluated at the same times as the \ :py:obj:`time` array ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") # Note that rhos can affect RpRs, so we should really do this iteratively, # but the effect is pretty negligible! RpRs = Get_RpRs(depth, t0=t0, per=per, **kwargs) rhos = Get_rhos(dur, t0=t0, per=per, **kwargs) return ps.Transit(t0=t0, per=per, RpRs=RpRs, rhos=rhos, **kwargs)(time)
[ "def", "Transit", "(", "time", ",", "t0", "=", "0.", ",", "dur", "=", "0.1", ",", "per", "=", "3.56789", ",", "depth", "=", "0.001", ",", "*", "*", "kwargs", ")", ":", "if", "ps", "is", "None", ":", "raise", "Exception", "(", "\"Unable to import `pysyzygy`.\"", ")", "# Note that rhos can affect RpRs, so we should really do this iteratively,", "# but the effect is pretty negligible!", "RpRs", "=", "Get_RpRs", "(", "depth", ",", "t0", "=", "t0", ",", "per", "=", "per", ",", "*", "*", "kwargs", ")", "rhos", "=", "Get_rhos", "(", "dur", ",", "t0", "=", "t0", ",", "per", "=", "per", ",", "*", "*", "kwargs", ")", "return", "ps", ".", "Transit", "(", "t0", "=", "t0", ",", "per", "=", "per", ",", "RpRs", "=", "RpRs", ",", "rhos", "=", "rhos", ",", "*", "*", "kwargs", ")", "(", "time", ")" ]
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model, but with the depth and the duration as primary input variables. :param numpy.ndarray time: The time array :param float t0: The time of first transit in units of \ :py:obj:`BJD` - 2454833. :param float dur: The transit duration in days. Don't go too crazy on \ this one -- very small or very large values will break the \ inverter. Default 0.1 :param float per: The orbital period in days. Default 3.56789 :param float depth: The fractional transit depth. Default 0.001 :param dict kwargs: Any additional keyword arguments, passed directly \ to :py:func:`pysyzygy.Transit` :returns tmod: The transit model evaluated at the same times as the \ :py:obj:`time` array
[ "A", "Mandel", "-", "Agol", "<http", ":", "//", "adsabs", ".", "harvard", ".", "edu", "/", "abs", "/", "2002ApJ", "...", "580L", ".", "171M", ">", "_", "transit", "model", "but", "with", "the", "depth", "and", "the", "duration", "as", "primary", "input", "variables", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/transit.py#L164-L191
lsbardel/python-stdnet
examples/tsmodels.py
TimeSeries.intervals
def intervals(self, startdate, enddate, parseinterval=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could contain 0, 1 or 2 tuples.''' return missing_intervals(startdate, enddate, self.data_start, self.data_end, dateconverter=self.todate, parseinterval=parseinterval)
python
def intervals(self, startdate, enddate, parseinterval=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could contain 0, 1 or 2 tuples.''' return missing_intervals(startdate, enddate, self.data_start, self.data_end, dateconverter=self.todate, parseinterval=parseinterval)
[ "def", "intervals", "(", "self", ",", "startdate", ",", "enddate", ",", "parseinterval", "=", "None", ")", ":", "return", "missing_intervals", "(", "startdate", ",", "enddate", ",", "self", ".", "data_start", ",", "self", ".", "data_end", ",", "dateconverter", "=", "self", ".", "todate", ",", "parseinterval", "=", "parseinterval", ")" ]
Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could contain 0, 1 or 2 tuples.
[ "Given", "a", "startdate", "and", "an", "enddate", "dates", "evaluate", "the", "date", "intervals", "from", "which", "data", "is", "not", "available", ".", "It", "return", "a", "list", "of", "two", "-", "dimensional", "tuples", "containing", "start", "and", "end", "date", "for", "the", "interval", ".", "The", "list", "could", "contain", "0", "1", "or", "2", "tuples", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/tsmodels.py#L35-L42
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.front
def front(self, *fields): '''Return the front pair of the structure''' v, f = tuple(self.irange(0, 0, fields=fields)) if v: return (v[0], dict(((field, f[field][0]) for field in f)))
python
def front(self, *fields): '''Return the front pair of the structure''' v, f = tuple(self.irange(0, 0, fields=fields)) if v: return (v[0], dict(((field, f[field][0]) for field in f)))
[ "def", "front", "(", "self", ",", "*", "fields", ")", ":", "v", ",", "f", "=", "tuple", "(", "self", ".", "irange", "(", "0", ",", "0", ",", "fields", "=", "fields", ")", ")", "if", "v", ":", "return", "(", "v", "[", "0", "]", ",", "dict", "(", "(", "(", "field", ",", "f", "[", "field", "]", "[", "0", "]", ")", "for", "field", "in", "f", ")", ")", ")" ]
Return the front pair of the structure
[ "Return", "the", "front", "pair", "of", "the", "structure" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L52-L56
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.istats
def istats(self, start=0, end=-1, fields=None): '''Perform a multivariate statistic calculation of this :class:`ColumnTS` from *start* to *end*. :param start: Optional index (rank) where to start the analysis. :param end: Optional index (rank) where to end the analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis. ''' backend = self.read_backend return backend.execute( backend.structure(self).istats(start, end, fields), self._stats)
python
def istats(self, start=0, end=-1, fields=None): '''Perform a multivariate statistic calculation of this :class:`ColumnTS` from *start* to *end*. :param start: Optional index (rank) where to start the analysis. :param end: Optional index (rank) where to end the analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis. ''' backend = self.read_backend return backend.execute( backend.structure(self).istats(start, end, fields), self._stats)
[ "def", "istats", "(", "self", ",", "start", "=", "0", ",", "end", "=", "-", "1", ",", "fields", "=", "None", ")", ":", "backend", "=", "self", ".", "read_backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "istats", "(", "start", ",", "end", ",", "fields", ")", ",", "self", ".", "_stats", ")" ]
Perform a multivariate statistic calculation of this :class:`ColumnTS` from *start* to *end*. :param start: Optional index (rank) where to start the analysis. :param end: Optional index (rank) where to end the analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis.
[ "Perform", "a", "multivariate", "statistic", "calculation", "of", "this", ":", "class", ":", "ColumnTS", "from", "*", "start", "*", "to", "*", "end", "*", ".", ":", "param", "start", ":", "Optional", "index", "(", "rank", ")", "where", "to", "start", "the", "analysis", ".", ":", "param", "end", ":", "Optional", "index", "(", "rank", ")", "where", "to", "end", "the", "analysis", ".", ":", "param", "fields", ":", "Optional", "subset", "of", ":", "meth", ":", "fields", "to", "perform", "analysis", "on", ".", "If", "not", "provided", "all", "fields", "are", "included", "in", "the", "analysis", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L102-L113
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.stats
def stats(self, start, end, fields=None): '''Perform a multivariate statistic calculation of this :class:`ColumnTS` from a *start* date/datetime to an *end* date/datetime. :param start: Start date for analysis. :param end: End date for analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis. ''' start = self.pickler.dumps(start) end = self.pickler.dumps(end) backend = self.read_backend return backend.execute( backend.structure(self).stats(start, end, fields), self._stats)
python
def stats(self, start, end, fields=None): '''Perform a multivariate statistic calculation of this :class:`ColumnTS` from a *start* date/datetime to an *end* date/datetime. :param start: Start date for analysis. :param end: End date for analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis. ''' start = self.pickler.dumps(start) end = self.pickler.dumps(end) backend = self.read_backend return backend.execute( backend.structure(self).stats(start, end, fields), self._stats)
[ "def", "stats", "(", "self", ",", "start", ",", "end", ",", "fields", "=", "None", ")", ":", "start", "=", "self", ".", "pickler", ".", "dumps", "(", "start", ")", "end", "=", "self", ".", "pickler", ".", "dumps", "(", "end", ")", "backend", "=", "self", ".", "read_backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "stats", "(", "start", ",", "end", ",", "fields", ")", ",", "self", ".", "_stats", ")" ]
Perform a multivariate statistic calculation of this :class:`ColumnTS` from a *start* date/datetime to an *end* date/datetime. :param start: Start date for analysis. :param end: End date for analysis. :param fields: Optional subset of :meth:`fields` to perform analysis on. If not provided all fields are included in the analysis.
[ "Perform", "a", "multivariate", "statistic", "calculation", "of", "this", ":", "class", ":", "ColumnTS", "from", "a", "*", "start", "*", "date", "/", "datetime", "to", "an", "*", "end", "*", "date", "/", "datetime", ".", ":", "param", "start", ":", "Start", "date", "for", "analysis", ".", ":", "param", "end", ":", "End", "date", "for", "analysis", ".", ":", "param", "fields", ":", "Optional", "subset", "of", ":", "meth", ":", "fields", "to", "perform", "analysis", "on", ".", "If", "not", "provided", "all", "fields", "are", "included", "in", "the", "analysis", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L115-L129
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.imulti_stats
def imulti_stats(self, start=0, end=-1, series=None, fields=None, stats=None): '''Perform cross multivariate statistics calculation of this :class:`ColumnTS` and other optional *series* from *start* to *end*. :parameter start: the start rank. :parameter start: the end rank :parameter field: name of field to perform multivariate statistics. :parameter series: a list of two elements tuple containing the id of the a :class:`columnTS` and a field name. :parameter stats: list of statistics to evaluate. Default: ['covariance'] ''' stats = stats or self.default_multi_stats backend = self.read_backend return backend.execute( backend.structure(self).imulti_stats(start, end, fields, series, stats), self._stats)
python
def imulti_stats(self, start=0, end=-1, series=None, fields=None, stats=None): '''Perform cross multivariate statistics calculation of this :class:`ColumnTS` and other optional *series* from *start* to *end*. :parameter start: the start rank. :parameter start: the end rank :parameter field: name of field to perform multivariate statistics. :parameter series: a list of two elements tuple containing the id of the a :class:`columnTS` and a field name. :parameter stats: list of statistics to evaluate. Default: ['covariance'] ''' stats = stats or self.default_multi_stats backend = self.read_backend return backend.execute( backend.structure(self).imulti_stats(start, end, fields, series, stats), self._stats)
[ "def", "imulti_stats", "(", "self", ",", "start", "=", "0", ",", "end", "=", "-", "1", ",", "series", "=", "None", ",", "fields", "=", "None", ",", "stats", "=", "None", ")", ":", "stats", "=", "stats", "or", "self", ".", "default_multi_stats", "backend", "=", "self", ".", "read_backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "imulti_stats", "(", "start", ",", "end", ",", "fields", ",", "series", ",", "stats", ")", ",", "self", ".", "_stats", ")" ]
Perform cross multivariate statistics calculation of this :class:`ColumnTS` and other optional *series* from *start* to *end*. :parameter start: the start rank. :parameter start: the end rank :parameter field: name of field to perform multivariate statistics. :parameter series: a list of two elements tuple containing the id of the a :class:`columnTS` and a field name. :parameter stats: list of statistics to evaluate. Default: ['covariance']
[ "Perform", "cross", "multivariate", "statistics", "calculation", "of", "this", ":", "class", ":", "ColumnTS", "and", "other", "optional", "*", "series", "*", "from", "*", "start", "*", "to", "*", "end", "*", ".", ":", "parameter", "start", ":", "the", "start", "rank", ".", ":", "parameter", "start", ":", "the", "end", "rank", ":", "parameter", "field", ":", "name", "of", "field", "to", "perform", "multivariate", "statistics", ".", ":", "parameter", "series", ":", "a", "list", "of", "two", "elements", "tuple", "containing", "the", "id", "of", "the", "a", ":", "class", ":", "columnTS", "and", "a", "field", "name", ".", ":", "parameter", "stats", ":", "list", "of", "statistics", "to", "evaluate", ".", "Default", ":", "[", "covariance", "]" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L131-L149
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.merge
def merge(self, *series, **kwargs): '''Merge this :class:`ColumnTS` with several other *series*. :parameters series: a list of tuples where the nth element is a tuple of the form:: (wight_n, ts_n1, ts_n2, ..., ts_nMn) The result will be calculated using the formula:: ts = weight_1*ts_11*ts_12*...*ts_1M1 + weight_2*ts_21*ts_22*...*ts_2M2 + ... ''' session = self.session if not session: raise SessionNotAvailable('No session available') self.check_router(session.router, *series) return self._merge(*series, **kwargs)
python
def merge(self, *series, **kwargs): '''Merge this :class:`ColumnTS` with several other *series*. :parameters series: a list of tuples where the nth element is a tuple of the form:: (wight_n, ts_n1, ts_n2, ..., ts_nMn) The result will be calculated using the formula:: ts = weight_1*ts_11*ts_12*...*ts_1M1 + weight_2*ts_21*ts_22*...*ts_2M2 + ... ''' session = self.session if not session: raise SessionNotAvailable('No session available') self.check_router(session.router, *series) return self._merge(*series, **kwargs)
[ "def", "merge", "(", "self", ",", "*", "series", ",", "*", "*", "kwargs", ")", ":", "session", "=", "self", ".", "session", "if", "not", "session", ":", "raise", "SessionNotAvailable", "(", "'No session available'", ")", "self", ".", "check_router", "(", "session", ".", "router", ",", "*", "series", ")", "return", "self", ".", "_merge", "(", "*", "series", ",", "*", "*", "kwargs", ")" ]
Merge this :class:`ColumnTS` with several other *series*. :parameters series: a list of tuples where the nth element is a tuple of the form:: (wight_n, ts_n1, ts_n2, ..., ts_nMn) The result will be calculated using the formula:: ts = weight_1*ts_11*ts_12*...*ts_1M1 + weight_2*ts_21*ts_22*...*ts_2M2 + ...
[ "Merge", "this", ":", "class", ":", "ColumnTS", "with", "several", "other", "*", "series", "*", ".", ":", "parameters", "series", ":", "a", "list", "of", "tuples", "where", "the", "nth", "element", "is", "a", "tuple", "of", "the", "form", "::", "(", "wight_n", "ts_n1", "ts_n2", "...", "ts_nMn", ")", "The", "result", "will", "be", "calculated", "using", "the", "formula", "::", "ts", "=", "weight_1", "*", "ts_11", "*", "ts_12", "*", "...", "*", "ts_1M1", "+", "weight_2", "*", "ts_21", "*", "ts_22", "*", "...", "*", "ts_2M2", "+", "..." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L171-L188
lsbardel/python-stdnet
stdnet/apps/columnts/models.py
ColumnTS.merged_series
def merged_series(cls, *series, **kwargs): '''Merge ``series`` and return the results without storing data in the backend server.''' router, backend = cls.check_router(None, *series) if backend: target = router.register(cls(), backend) router.session().add(target) target._merge(*series, **kwargs) backend = target.backend return backend.execute( backend.structure(target).irange_and_delete(), target.load_data)
python
def merged_series(cls, *series, **kwargs): '''Merge ``series`` and return the results without storing data in the backend server.''' router, backend = cls.check_router(None, *series) if backend: target = router.register(cls(), backend) router.session().add(target) target._merge(*series, **kwargs) backend = target.backend return backend.execute( backend.structure(target).irange_and_delete(), target.load_data)
[ "def", "merged_series", "(", "cls", ",", "*", "series", ",", "*", "*", "kwargs", ")", ":", "router", ",", "backend", "=", "cls", ".", "check_router", "(", "None", ",", "*", "series", ")", "if", "backend", ":", "target", "=", "router", ".", "register", "(", "cls", "(", ")", ",", "backend", ")", "router", ".", "session", "(", ")", ".", "add", "(", "target", ")", "target", ".", "_merge", "(", "*", "series", ",", "*", "*", "kwargs", ")", "backend", "=", "target", ".", "backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "target", ")", ".", "irange_and_delete", "(", ")", ",", "target", ".", "load_data", ")" ]
Merge ``series`` and return the results without storing data in the backend server.
[ "Merge", "series", "and", "return", "the", "results", "without", "storing", "data", "in", "the", "backend", "server", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/models.py#L191-L202
lsbardel/python-stdnet
stdnet/utils/skiplist.py
skiplist.rank
def rank(self, score): '''Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the left most closest index with score less than *score*.''' node = self.__head rank = 0 for i in range(self.__level-1, -1, -1): while node.next[i] and node.next[i].score <= score: rank += node.width[i] node = node.next[i] if node.score == score: return rank - 1 else: return -1 - rank
python
def rank(self, score): '''Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the left most closest index with score less than *score*.''' node = self.__head rank = 0 for i in range(self.__level-1, -1, -1): while node.next[i] and node.next[i].score <= score: rank += node.width[i] node = node.next[i] if node.score == score: return rank - 1 else: return -1 - rank
[ "def", "rank", "(", "self", ",", "score", ")", ":", "node", "=", "self", ".", "__head", "rank", "=", "0", "for", "i", "in", "range", "(", "self", ".", "__level", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "while", "node", ".", "next", "[", "i", "]", "and", "node", ".", "next", "[", "i", "]", ".", "score", "<=", "score", ":", "rank", "+=", "node", ".", "width", "[", "i", "]", "node", "=", "node", ".", "next", "[", "i", "]", "if", "node", ".", "score", "==", "score", ":", "return", "rank", "-", "1", "else", ":", "return", "-", "1", "-", "rank" ]
Return the 0-based index (rank) of ``score``. If the score is not available it returns a negative integer which absolute score is the left most closest index with score less than *score*.
[ "Return", "the", "0", "-", "based", "index", "(", "rank", ")", "of", "score", ".", "If", "the", "score", "is", "not", "available", "it", "returns", "a", "negative", "integer", "which", "absolute", "score", "is", "the", "left", "most", "closest", "index", "with", "score", "less", "than", "*", "score", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/skiplist.py#L74-L87
lsbardel/python-stdnet
stdnet/odm/base.py
ModelMeta.make_object
def make_object(self, state=None, backend=None): '''Create a new instance of :attr:`model` from a *state* tuple.''' model = self.model obj = model.__new__(model) self.load_state(obj, state, backend) return obj
python
def make_object(self, state=None, backend=None): '''Create a new instance of :attr:`model` from a *state* tuple.''' model = self.model obj = model.__new__(model) self.load_state(obj, state, backend) return obj
[ "def", "make_object", "(", "self", ",", "state", "=", "None", ",", "backend", "=", "None", ")", ":", "model", "=", "self", ".", "model", "obj", "=", "model", ".", "__new__", "(", "model", ")", "self", ".", "load_state", "(", "obj", ",", "state", ",", "backend", ")", "return", "obj" ]
Create a new instance of :attr:`model` from a *state* tuple.
[ "Create", "a", "new", "instance", "of", ":", "attr", ":", "model", "from", "a", "*", "state", "*", "tuple", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L201-L206
lsbardel/python-stdnet
stdnet/odm/base.py
ModelMeta.is_valid
def is_valid(self, instance): '''Perform validation for *instance* and stores serialized data, indexes and errors into local cache. Return ``True`` if the instance is ready to be saved to database.''' dbdata = instance.dbdata data = dbdata['cleaned_data'] = {} errors = dbdata['errors'] = {} #Loop over scalar fields first for field, value in instance.fieldvalue_pairs(): name = field.attname try: svalue = field.set_get_value(instance, value) except Exception as e: errors[name] = str(e) else: if (svalue is None or svalue is '') and field.required: errors[name] = ("Field '{0}' is required for '{1}'." .format(name, self)) else: if isinstance(svalue, dict): data.update(svalue) elif svalue is not None: data[name] = svalue return len(errors) == 0
python
def is_valid(self, instance): '''Perform validation for *instance* and stores serialized data, indexes and errors into local cache. Return ``True`` if the instance is ready to be saved to database.''' dbdata = instance.dbdata data = dbdata['cleaned_data'] = {} errors = dbdata['errors'] = {} #Loop over scalar fields first for field, value in instance.fieldvalue_pairs(): name = field.attname try: svalue = field.set_get_value(instance, value) except Exception as e: errors[name] = str(e) else: if (svalue is None or svalue is '') and field.required: errors[name] = ("Field '{0}' is required for '{1}'." .format(name, self)) else: if isinstance(svalue, dict): data.update(svalue) elif svalue is not None: data[name] = svalue return len(errors) == 0
[ "def", "is_valid", "(", "self", ",", "instance", ")", ":", "dbdata", "=", "instance", ".", "dbdata", "data", "=", "dbdata", "[", "'cleaned_data'", "]", "=", "{", "}", "errors", "=", "dbdata", "[", "'errors'", "]", "=", "{", "}", "#Loop over scalar fields first\r", "for", "field", ",", "value", "in", "instance", ".", "fieldvalue_pairs", "(", ")", ":", "name", "=", "field", ".", "attname", "try", ":", "svalue", "=", "field", ".", "set_get_value", "(", "instance", ",", "value", ")", "except", "Exception", "as", "e", ":", "errors", "[", "name", "]", "=", "str", "(", "e", ")", "else", ":", "if", "(", "svalue", "is", "None", "or", "svalue", "is", "''", ")", "and", "field", ".", "required", ":", "errors", "[", "name", "]", "=", "(", "\"Field '{0}' is required for '{1}'.\"", ".", "format", "(", "name", ",", "self", ")", ")", "else", ":", "if", "isinstance", "(", "svalue", ",", "dict", ")", ":", "data", ".", "update", "(", "svalue", ")", "elif", "svalue", "is", "not", "None", ":", "data", "[", "name", "]", "=", "svalue", "return", "len", "(", "errors", ")", "==", "0" ]
Perform validation for *instance* and stores serialized data, indexes and errors into local cache. Return ``True`` if the instance is ready to be saved to database.
[ "Perform", "validation", "for", "*", "instance", "*", "and", "stores", "serialized", "data", "indexes", "and", "errors", "into", "local", "cache", ".", "Return", "True", "if", "the", "instance", "is", "ready", "to", "be", "saved", "to", "database", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L239-L262
lsbardel/python-stdnet
stdnet/odm/base.py
ModelMeta.backend_fields
def backend_fields(self, fields): '''Return a two elements tuple containing a list of fields names and a list of field attribute names.''' dfields = self.dfields processed = set() names = [] atts = [] pkname = self.pkname() for name in fields: if name == pkname or name in processed: continue elif name in dfields: processed.add(name) field = dfields[name] names.append(field.name) atts.append(field.attname) else: bname = name.split(JSPLITTER)[0] if bname in dfields: field = dfields[bname] if field.type in ('json object', 'related object'): processed.add(name) names.append(name) atts.append(name) return names, atts
python
def backend_fields(self, fields): '''Return a two elements tuple containing a list of fields names and a list of field attribute names.''' dfields = self.dfields processed = set() names = [] atts = [] pkname = self.pkname() for name in fields: if name == pkname or name in processed: continue elif name in dfields: processed.add(name) field = dfields[name] names.append(field.name) atts.append(field.attname) else: bname = name.split(JSPLITTER)[0] if bname in dfields: field = dfields[bname] if field.type in ('json object', 'related object'): processed.add(name) names.append(name) atts.append(name) return names, atts
[ "def", "backend_fields", "(", "self", ",", "fields", ")", ":", "dfields", "=", "self", ".", "dfields", "processed", "=", "set", "(", ")", "names", "=", "[", "]", "atts", "=", "[", "]", "pkname", "=", "self", ".", "pkname", "(", ")", "for", "name", "in", "fields", ":", "if", "name", "==", "pkname", "or", "name", "in", "processed", ":", "continue", "elif", "name", "in", "dfields", ":", "processed", ".", "add", "(", "name", ")", "field", "=", "dfields", "[", "name", "]", "names", ".", "append", "(", "field", ".", "name", ")", "atts", ".", "append", "(", "field", ".", "attname", ")", "else", ":", "bname", "=", "name", ".", "split", "(", "JSPLITTER", ")", "[", "0", "]", "if", "bname", "in", "dfields", ":", "field", "=", "dfields", "[", "bname", "]", "if", "field", ".", "type", "in", "(", "'json object'", ",", "'related object'", ")", ":", "processed", ".", "add", "(", "name", ")", "names", ".", "append", "(", "name", ")", "atts", ".", "append", "(", "name", ")", "return", "names", ",", "atts" ]
Return a two elements tuple containing a list of fields names and a list of field attribute names.
[ "Return", "a", "two", "elements", "tuple", "containing", "a", "list", "of", "fields", "names", "and", "a", "list", "of", "field", "attribute", "names", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L292-L316
lsbardel/python-stdnet
stdnet/odm/base.py
ModelMeta.as_dict
def as_dict(self): '''Model metadata in a dictionary''' pk = self.pk id_type = 3 if pk.type == 'auto': id_type = 1 return {'id_name': pk.name, 'id_type': id_type, 'sorted': bool(self.ordering), 'autoincr': self.ordering and self.ordering.auto, 'multi_fields': [field.name for field in self.multifields], 'indices': dict(((idx.attname, idx.unique) for idx in self.indices))}
python
def as_dict(self): '''Model metadata in a dictionary''' pk = self.pk id_type = 3 if pk.type == 'auto': id_type = 1 return {'id_name': pk.name, 'id_type': id_type, 'sorted': bool(self.ordering), 'autoincr': self.ordering and self.ordering.auto, 'multi_fields': [field.name for field in self.multifields], 'indices': dict(((idx.attname, idx.unique) for idx in self.indices))}
[ "def", "as_dict", "(", "self", ")", ":", "pk", "=", "self", ".", "pk", "id_type", "=", "3", "if", "pk", ".", "type", "==", "'auto'", ":", "id_type", "=", "1", "return", "{", "'id_name'", ":", "pk", ".", "name", ",", "'id_type'", ":", "id_type", ",", "'sorted'", ":", "bool", "(", "self", ".", "ordering", ")", ",", "'autoincr'", ":", "self", ".", "ordering", "and", "self", ".", "ordering", ".", "auto", ",", "'multi_fields'", ":", "[", "field", ".", "name", "for", "field", "in", "self", ".", "multifields", "]", ",", "'indices'", ":", "dict", "(", "(", "(", "idx", ".", "attname", ",", "idx", ".", "unique", ")", "for", "idx", "in", "self", ".", "indices", ")", ")", "}" ]
Model metadata in a dictionary
[ "Model", "metadata", "in", "a", "dictionary" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L318-L330
lsbardel/python-stdnet
stdnet/odm/base.py
Model.get_state
def get_state(self, **kwargs): '''Return the current :class:`ModelState` for this :class:`Model`. If ``kwargs`` parameters are passed a new :class:`ModelState` is created, otherwise it returns the cached value.''' dbdata = self.dbdata if 'state' not in dbdata or kwargs: dbdata['state'] = ModelState(self, **kwargs) return dbdata['state']
python
def get_state(self, **kwargs): '''Return the current :class:`ModelState` for this :class:`Model`. If ``kwargs`` parameters are passed a new :class:`ModelState` is created, otherwise it returns the cached value.''' dbdata = self.dbdata if 'state' not in dbdata or kwargs: dbdata['state'] = ModelState(self, **kwargs) return dbdata['state']
[ "def", "get_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "dbdata", "=", "self", ".", "dbdata", "if", "'state'", "not", "in", "dbdata", "or", "kwargs", ":", "dbdata", "[", "'state'", "]", "=", "ModelState", "(", "self", ",", "*", "*", "kwargs", ")", "return", "dbdata", "[", "'state'", "]" ]
Return the current :class:`ModelState` for this :class:`Model`. If ``kwargs`` parameters are passed a new :class:`ModelState` is created, otherwise it returns the cached value.
[ "Return", "the", "current", ":", "class", ":", "ModelState", "for", "this", ":", "class", ":", "Model", ".", "If", "kwargs", "parameters", "are", "passed", "a", "new", ":", "class", ":", "ModelState", "is", "created", "otherwise", "it", "returns", "the", "cached", "value", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L475-L482
lsbardel/python-stdnet
stdnet/odm/base.py
Model.uuid
def uuid(self): '''Universally unique identifier for an instance of a :class:`Model`. ''' pk = self.pkvalue() if not pk: raise self.DoesNotExist( 'Object not saved. Cannot obtain universally unique id') return self.get_uuid(pk)
python
def uuid(self): '''Universally unique identifier for an instance of a :class:`Model`. ''' pk = self.pkvalue() if not pk: raise self.DoesNotExist( 'Object not saved. Cannot obtain universally unique id') return self.get_uuid(pk)
[ "def", "uuid", "(", "self", ")", ":", "pk", "=", "self", ".", "pkvalue", "(", ")", "if", "not", "pk", ":", "raise", "self", ".", "DoesNotExist", "(", "'Object not saved. Cannot obtain universally unique id'", ")", "return", "self", ".", "get_uuid", "(", "pk", ")" ]
Universally unique identifier for an instance of a :class:`Model`.
[ "Universally", "unique", "identifier", "for", "an", "instance", "of", "a", ":", "class", ":", "Model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L493-L500
lsbardel/python-stdnet
stdnet/odm/base.py
Model.backend
def backend(self, client=None): '''The :class:`stdnet.BackendDatServer` for this instance. It can be ``None``. ''' session = self.session if session: return session.model(self).backend
python
def backend(self, client=None): '''The :class:`stdnet.BackendDatServer` for this instance. It can be ``None``. ''' session = self.session if session: return session.model(self).backend
[ "def", "backend", "(", "self", ",", "client", "=", "None", ")", ":", "session", "=", "self", ".", "session", "if", "session", ":", "return", "session", ".", "model", "(", "self", ")", ".", "backend" ]
The :class:`stdnet.BackendDatServer` for this instance. It can be ``None``.
[ "The", ":", "class", ":", "stdnet", ".", "BackendDatServer", "for", "this", "instance", ".", "It", "can", "be", "None", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L517-L524
lsbardel/python-stdnet
stdnet/odm/base.py
Model.read_backend
def read_backend(self, client=None): '''The read :class:`stdnet.BackendDatServer` for this instance. It can be ``None``. ''' session = self.session if session: return session.model(self).read_backend
python
def read_backend(self, client=None): '''The read :class:`stdnet.BackendDatServer` for this instance. It can be ``None``. ''' session = self.session if session: return session.model(self).read_backend
[ "def", "read_backend", "(", "self", ",", "client", "=", "None", ")", ":", "session", "=", "self", ".", "session", "if", "session", ":", "return", "session", ".", "model", "(", "self", ")", ".", "read_backend" ]
The read :class:`stdnet.BackendDatServer` for this instance. It can be ``None``.
[ "The", "read", ":", "class", ":", "stdnet", ".", "BackendDatServer", "for", "this", "instance", ".", "It", "can", "be", "None", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/base.py#L527-L534
lsbardel/python-stdnet
stdnet/odm/models.py
create_model
def create_model(name, *attributes, **params): '''Create a :class:`Model` class for objects requiring and interface similar to :class:`StdModel`. We refers to this type of models as :ref:`local models <local-models>` since instances of such models are not persistent on a :class:`stdnet.BackendDataServer`. :param name: Name of the model class. :param attributes: positiona attribute names. These are the only attribute available to the model during the default constructor. :param params: key-valued parameter to pass to the :class:`ModelMeta` constructor. :return: a local :class:`Model` class. ''' params['register'] = False params['attributes'] = attributes kwargs = {'manager_class': params.pop('manager_class', Manager), 'Meta': params} return ModelType(name, (StdModel,), kwargs)
python
def create_model(name, *attributes, **params): '''Create a :class:`Model` class for objects requiring and interface similar to :class:`StdModel`. We refers to this type of models as :ref:`local models <local-models>` since instances of such models are not persistent on a :class:`stdnet.BackendDataServer`. :param name: Name of the model class. :param attributes: positiona attribute names. These are the only attribute available to the model during the default constructor. :param params: key-valued parameter to pass to the :class:`ModelMeta` constructor. :return: a local :class:`Model` class. ''' params['register'] = False params['attributes'] = attributes kwargs = {'manager_class': params.pop('manager_class', Manager), 'Meta': params} return ModelType(name, (StdModel,), kwargs)
[ "def", "create_model", "(", "name", ",", "*", "attributes", ",", "*", "*", "params", ")", ":", "params", "[", "'register'", "]", "=", "False", "params", "[", "'attributes'", "]", "=", "attributes", "kwargs", "=", "{", "'manager_class'", ":", "params", ".", "pop", "(", "'manager_class'", ",", "Manager", ")", ",", "'Meta'", ":", "params", "}", "return", "ModelType", "(", "name", ",", "(", "StdModel", ",", ")", ",", "kwargs", ")" ]
Create a :class:`Model` class for objects requiring and interface similar to :class:`StdModel`. We refers to this type of models as :ref:`local models <local-models>` since instances of such models are not persistent on a :class:`stdnet.BackendDataServer`. :param name: Name of the model class. :param attributes: positiona attribute names. These are the only attribute available to the model during the default constructor. :param params: key-valued parameter to pass to the :class:`ModelMeta` constructor. :return: a local :class:`Model` class.
[ "Create", "a", ":", "class", ":", "Model", "class", "for", "objects", "requiring", "and", "interface", "similar", "to", ":", "class", ":", "StdModel", ".", "We", "refers", "to", "this", "type", "of", "models", "as", ":", "ref", ":", "local", "models", "<local", "-", "models", ">", "since", "instances", "of", "such", "models", "are", "not", "persistent", "on", "a", ":", "class", ":", "stdnet", ".", "BackendDataServer", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L290-L307
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.loadedfields
def loadedfields(self): '''Generator of fields loaded from database''' if self._loadedfields is None: for field in self._meta.scalarfields: yield field else: fields = self._meta.dfields processed = set() for name in self._loadedfields: if name in processed: continue if name in fields: processed.add(name) yield fields[name] else: name = name.split(JSPLITTER)[0] if name in fields and name not in processed: field = fields[name] if field.type == 'json object': processed.add(name) yield field
python
def loadedfields(self): '''Generator of fields loaded from database''' if self._loadedfields is None: for field in self._meta.scalarfields: yield field else: fields = self._meta.dfields processed = set() for name in self._loadedfields: if name in processed: continue if name in fields: processed.add(name) yield fields[name] else: name = name.split(JSPLITTER)[0] if name in fields and name not in processed: field = fields[name] if field.type == 'json object': processed.add(name) yield field
[ "def", "loadedfields", "(", "self", ")", ":", "if", "self", ".", "_loadedfields", "is", "None", ":", "for", "field", "in", "self", ".", "_meta", ".", "scalarfields", ":", "yield", "field", "else", ":", "fields", "=", "self", ".", "_meta", ".", "dfields", "processed", "=", "set", "(", ")", "for", "name", "in", "self", ".", "_loadedfields", ":", "if", "name", "in", "processed", ":", "continue", "if", "name", "in", "fields", ":", "processed", ".", "add", "(", "name", ")", "yield", "fields", "[", "name", "]", "else", ":", "name", "=", "name", ".", "split", "(", "JSPLITTER", ")", "[", "0", "]", "if", "name", "in", "fields", "and", "name", "not", "in", "processed", ":", "field", "=", "fields", "[", "name", "]", "if", "field", ".", "type", "==", "'json object'", ":", "processed", ".", "add", "(", "name", ")", "yield", "field" ]
Generator of fields loaded from database
[ "Generator", "of", "fields", "loaded", "from", "database" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L57-L77
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.fieldvalue_pairs
def fieldvalue_pairs(self, exclude_cache=False): '''Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples''' for field in self._meta.scalarfields: if exclude_cache and field.as_cache: continue name = field.attname if hasattr(self, name): yield field, getattr(self, name)
python
def fieldvalue_pairs(self, exclude_cache=False): '''Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples''' for field in self._meta.scalarfields: if exclude_cache and field.as_cache: continue name = field.attname if hasattr(self, name): yield field, getattr(self, name)
[ "def", "fieldvalue_pairs", "(", "self", ",", "exclude_cache", "=", "False", ")", ":", "for", "field", "in", "self", ".", "_meta", ".", "scalarfields", ":", "if", "exclude_cache", "and", "field", ".", "as_cache", ":", "continue", "name", "=", "field", ".", "attname", "if", "hasattr", "(", "self", ",", "name", ")", ":", "yield", "field", ",", "getattr", "(", "self", ",", "name", ")" ]
Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples
[ "Generator", "of", "fields", "values", "pairs", ".", "Fields", "correspond", "to", "the", "ones", "which", "have", "been", "loaded", "(", "usually", "all", "of", "them", ")", "or", "not", "loaded", "but", "modified", ".", "Check", "the", ":", "ref", ":", "load_only", "<performance", "-", "loadonly", ">", "query", "function", "for", "more", "details", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L79-L95
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.clear_cache_fields
def clear_cache_fields(self): '''Set cache fields to ``None``. Check :attr:`Field.as_cache` for information regarding fields which are considered cache.''' for field in self._meta.scalarfields: if field.as_cache: setattr(self, field.name, None)
python
def clear_cache_fields(self): '''Set cache fields to ``None``. Check :attr:`Field.as_cache` for information regarding fields which are considered cache.''' for field in self._meta.scalarfields: if field.as_cache: setattr(self, field.name, None)
[ "def", "clear_cache_fields", "(", "self", ")", ":", "for", "field", "in", "self", ".", "_meta", ".", "scalarfields", ":", "if", "field", ".", "as_cache", ":", "setattr", "(", "self", ",", "field", ".", "name", ",", "None", ")" ]
Set cache fields to ``None``. Check :attr:`Field.as_cache` for information regarding fields which are considered cache.
[ "Set", "cache", "fields", "to", "None", ".", "Check", ":", "attr", ":", "Field", ".", "as_cache", "for", "information", "regarding", "fields", "which", "are", "considered", "cache", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L97-L102
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.get_attr_value
def get_attr_value(self, name): '''Retrieve the ``value`` for the attribute ``name``. The ``name`` can be nested following the :ref:`double underscore <tutorial-underscore>` notation, for example ``group__name``. If the attribute is not available it raises :class:`AttributeError`.''' if name in self._meta.dfields: return self._meta.dfields[name].get_value(self) elif not name.startswith('__') and JSPLITTER in name: bits = name.split(JSPLITTER) fname = bits[0] if fname in self._meta.dfields: return self._meta.dfields[fname].get_value(self, *bits[1:]) else: return getattr(self, name) else: return getattr(self, name)
python
def get_attr_value(self, name): '''Retrieve the ``value`` for the attribute ``name``. The ``name`` can be nested following the :ref:`double underscore <tutorial-underscore>` notation, for example ``group__name``. If the attribute is not available it raises :class:`AttributeError`.''' if name in self._meta.dfields: return self._meta.dfields[name].get_value(self) elif not name.startswith('__') and JSPLITTER in name: bits = name.split(JSPLITTER) fname = bits[0] if fname in self._meta.dfields: return self._meta.dfields[fname].get_value(self, *bits[1:]) else: return getattr(self, name) else: return getattr(self, name)
[ "def", "get_attr_value", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_meta", ".", "dfields", ":", "return", "self", ".", "_meta", ".", "dfields", "[", "name", "]", ".", "get_value", "(", "self", ")", "elif", "not", "name", ".", "startswith", "(", "'__'", ")", "and", "JSPLITTER", "in", "name", ":", "bits", "=", "name", ".", "split", "(", "JSPLITTER", ")", "fname", "=", "bits", "[", "0", "]", "if", "fname", "in", "self", ".", "_meta", ".", "dfields", ":", "return", "self", ".", "_meta", ".", "dfields", "[", "fname", "]", ".", "get_value", "(", "self", ",", "*", "bits", "[", "1", ":", "]", ")", "else", ":", "return", "getattr", "(", "self", ",", "name", ")", "else", ":", "return", "getattr", "(", "self", ",", "name", ")" ]
Retrieve the ``value`` for the attribute ``name``. The ``name`` can be nested following the :ref:`double underscore <tutorial-underscore>` notation, for example ``group__name``. If the attribute is not available it raises :class:`AttributeError`.
[ "Retrieve", "the", "value", "for", "the", "attribute", "name", ".", "The", "name", "can", "be", "nested", "following", "the", ":", "ref", ":", "double", "underscore", "<tutorial", "-", "underscore", ">", "notation", "for", "example", "group__name", ".", "If", "the", "attribute", "is", "not", "available", "it", "raises", ":", "class", ":", "AttributeError", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L104-L119
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.clone
def clone(self, **data): '''Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class. ''' meta = self._meta session = self.session pkname = meta.pkname() pkvalue = data.pop(pkname, None) fields = self.todict(exclude_cache=True) fields.update(data) fields.pop('__dbdata__', None) obj = self._meta.make_object((pkvalue, None, fields)) obj.session = session return obj
python
def clone(self, **data): '''Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class. ''' meta = self._meta session = self.session pkname = meta.pkname() pkvalue = data.pop(pkname, None) fields = self.todict(exclude_cache=True) fields.update(data) fields.pop('__dbdata__', None) obj = self._meta.make_object((pkvalue, None, fields)) obj.session = session return obj
[ "def", "clone", "(", "self", ",", "*", "*", "data", ")", ":", "meta", "=", "self", ".", "_meta", "session", "=", "self", ".", "session", "pkname", "=", "meta", ".", "pkname", "(", ")", "pkvalue", "=", "data", ".", "pop", "(", "pkname", ",", "None", ")", "fields", "=", "self", ".", "todict", "(", "exclude_cache", "=", "True", ")", "fields", ".", "update", "(", "data", ")", "fields", ".", "pop", "(", "'__dbdata__'", ",", "None", ")", "obj", "=", "self", ".", "_meta", ".", "make_object", "(", "(", "pkvalue", ",", "None", ",", "fields", ")", ")", "obj", ".", "session", "=", "session", "return", "obj" ]
Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class.
[ "Utility", "method", "for", "cloning", "the", "instance", "as", "a", "new", "object", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L121-L136
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.todict
def todict(self, exclude_cache=False): '''Return a dictionary of serialised scalar field for pickling. If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` will be excluded.''' odict = {} for field, value in self.fieldvalue_pairs(exclude_cache=exclude_cache): value = field.serialise(value) if value: odict[field.name] = value if self._dbdata and 'id' in self._dbdata: odict['__dbdata__'] = {'id': self._dbdata['id']} return odict
python
def todict(self, exclude_cache=False): '''Return a dictionary of serialised scalar field for pickling. If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` will be excluded.''' odict = {} for field, value in self.fieldvalue_pairs(exclude_cache=exclude_cache): value = field.serialise(value) if value: odict[field.name] = value if self._dbdata and 'id' in self._dbdata: odict['__dbdata__'] = {'id': self._dbdata['id']} return odict
[ "def", "todict", "(", "self", ",", "exclude_cache", "=", "False", ")", ":", "odict", "=", "{", "}", "for", "field", ",", "value", "in", "self", ".", "fieldvalue_pairs", "(", "exclude_cache", "=", "exclude_cache", ")", ":", "value", "=", "field", ".", "serialise", "(", "value", ")", "if", "value", ":", "odict", "[", "field", ".", "name", "]", "=", "value", "if", "self", ".", "_dbdata", "and", "'id'", "in", "self", ".", "_dbdata", ":", "odict", "[", "'__dbdata__'", "]", "=", "{", "'id'", ":", "self", ".", "_dbdata", "[", "'id'", "]", "}", "return", "odict" ]
Return a dictionary of serialised scalar field for pickling. If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` will be excluded.
[ "Return", "a", "dictionary", "of", "serialised", "scalar", "field", "for", "pickling", ".", "If", "the", "*", "exclude_cache", "*", "flag", "is", "True", "fields", "with", ":", "attr", ":", "Field", ".", "as_cache", "attribute", "set", "to", "True", "will", "be", "excluded", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L145-L156
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.load_fields
def load_fields(self, *fields): '''Load extra fields to this :class:`StdModel`.''' if self._loadedfields is not None: if self.session is None: raise SessionNotAvailable('No session available') meta = self._meta kwargs = {meta.pkname(): self.pkvalue()} obj = session.query(self).load_only(fields).get(**kwargs) for name in fields: field = meta.dfields.get(name) if field is not None: setattr(self, field.attname, getattr(obj, field.attname, None))
python
def load_fields(self, *fields): '''Load extra fields to this :class:`StdModel`.''' if self._loadedfields is not None: if self.session is None: raise SessionNotAvailable('No session available') meta = self._meta kwargs = {meta.pkname(): self.pkvalue()} obj = session.query(self).load_only(fields).get(**kwargs) for name in fields: field = meta.dfields.get(name) if field is not None: setattr(self, field.attname, getattr(obj, field.attname, None))
[ "def", "load_fields", "(", "self", ",", "*", "fields", ")", ":", "if", "self", ".", "_loadedfields", "is", "not", "None", ":", "if", "self", ".", "session", "is", "None", ":", "raise", "SessionNotAvailable", "(", "'No session available'", ")", "meta", "=", "self", ".", "_meta", "kwargs", "=", "{", "meta", ".", "pkname", "(", ")", ":", "self", ".", "pkvalue", "(", ")", "}", "obj", "=", "session", ".", "query", "(", "self", ")", ".", "load_only", "(", "fields", ")", ".", "get", "(", "*", "*", "kwargs", ")", "for", "name", "in", "fields", ":", "field", "=", "meta", ".", "dfields", ".", "get", "(", "name", ")", "if", "field", "is", "not", "None", ":", "setattr", "(", "self", ",", "field", ".", "attname", ",", "getattr", "(", "obj", ",", "field", ".", "attname", ",", "None", ")", ")" ]
Load extra fields to this :class:`StdModel`.
[ "Load", "extra", "fields", "to", "this", ":", "class", ":", "StdModel", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L172-L184
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.load_related_model
def load_related_model(self, name, load_only=None, dont_load=None): '''Load a the :class:`ForeignKey` field ``name`` if this is part of the fields of this model and if the related object is not already loaded. It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>` relationships. :parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load. :parameter load_only: Optional parameters which specify the fields to load. :parameter dont_load: Optional parameters which specify the fields not to load. :return: the related :class:`StdModel` instance. ''' field = self._meta.dfields.get(name) if not field: raise ValueError('Field "%s" not available' % name) elif not field.type == 'related object': raise ValueError('Field "%s" not a foreign key' % name) return self._load_related_model(field, load_only, dont_load)
python
def load_related_model(self, name, load_only=None, dont_load=None): '''Load a the :class:`ForeignKey` field ``name`` if this is part of the fields of this model and if the related object is not already loaded. It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>` relationships. :parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load. :parameter load_only: Optional parameters which specify the fields to load. :parameter dont_load: Optional parameters which specify the fields not to load. :return: the related :class:`StdModel` instance. ''' field = self._meta.dfields.get(name) if not field: raise ValueError('Field "%s" not available' % name) elif not field.type == 'related object': raise ValueError('Field "%s" not a foreign key' % name) return self._load_related_model(field, load_only, dont_load)
[ "def", "load_related_model", "(", "self", ",", "name", ",", "load_only", "=", "None", ",", "dont_load", "=", "None", ")", ":", "field", "=", "self", ".", "_meta", ".", "dfields", ".", "get", "(", "name", ")", "if", "not", "field", ":", "raise", "ValueError", "(", "'Field \"%s\" not available'", "%", "name", ")", "elif", "not", "field", ".", "type", "==", "'related object'", ":", "raise", "ValueError", "(", "'Field \"%s\" not a foreign key'", "%", "name", ")", "return", "self", ".", "_load_related_model", "(", "field", ",", "load_only", ",", "dont_load", ")" ]
Load a the :class:`ForeignKey` field ``name`` if this is part of the fields of this model and if the related object is not already loaded. It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>` relationships. :parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load. :parameter load_only: Optional parameters which specify the fields to load. :parameter dont_load: Optional parameters which specify the fields not to load. :return: the related :class:`StdModel` instance.
[ "Load", "a", "the", ":", "class", ":", "ForeignKey", "field", "name", "if", "this", "is", "part", "of", "the", "fields", "of", "this", "model", "and", "if", "the", "related", "object", "is", "not", "already", "loaded", ".", "It", "is", "used", "by", "the", "lazy", "loading", "mechanism", "of", ":", "ref", ":", "one", "-", "to", "-", "many", "<one", "-", "to", "-", "many", ">", "relationships", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L189-L205
lsbardel/python-stdnet
stdnet/odm/models.py
StdModel.from_base64_data
def from_base64_data(cls, **kwargs): '''Load a :class:`StdModel` from possibly base64encoded data. This method is used to load models from data obtained from the :meth:`tojson` method.''' o = cls() meta = cls._meta pkname = meta.pkname() for name, value in iteritems(kwargs): if name == pkname: field = meta.pk elif name in meta.dfields: field = meta.dfields[name] else: continue value = field.to_python(value) setattr(o, field.attname, value) return o
python
def from_base64_data(cls, **kwargs): '''Load a :class:`StdModel` from possibly base64encoded data. This method is used to load models from data obtained from the :meth:`tojson` method.''' o = cls() meta = cls._meta pkname = meta.pkname() for name, value in iteritems(kwargs): if name == pkname: field = meta.pk elif name in meta.dfields: field = meta.dfields[name] else: continue value = field.to_python(value) setattr(o, field.attname, value) return o
[ "def", "from_base64_data", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "o", "=", "cls", "(", ")", "meta", "=", "cls", ".", "_meta", "pkname", "=", "meta", ".", "pkname", "(", ")", "for", "name", ",", "value", "in", "iteritems", "(", "kwargs", ")", ":", "if", "name", "==", "pkname", ":", "field", "=", "meta", ".", "pk", "elif", "name", "in", "meta", ".", "dfields", ":", "field", "=", "meta", ".", "dfields", "[", "name", "]", "else", ":", "continue", "value", "=", "field", ".", "to_python", "(", "value", ")", "setattr", "(", "o", ",", "field", ".", "attname", ",", "value", ")", "return", "o" ]
Load a :class:`StdModel` from possibly base64encoded data. This method is used to load models from data obtained from the :meth:`tojson` method.
[ "Load", "a", ":", "class", ":", "StdModel", "from", "possibly", "base64encoded", "data", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/models.py#L214-L231
rodluger/everest
everest/standalone.py
DetrendFITS
def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs): """ De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`. :param str fitsfile: The full path to the FITS file :param ndarray aperture: A 2D integer array corresponding to the \ desired photometric aperture (1 = in aperture, 0 = outside \ aperture). Default is to interactively select an aperture. :param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`. :returns: An :py:class:`everest.Everest` instance. """ # Get info EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID'] if season is None: season = pyfits.getheader(fitsfile, 0)['CAMPAIGN'] if season is None or season == "": season = 0 everestfile = os.path.join( everest.missions.k2.TargetDirectory(EPIC, season), everest.missions.k2.FITSFile(EPIC, season)) # De-trend? if clobber or not os.path.exists(everestfile): # Get raw data data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs) # De-trend model = everest.rPLD(EPIC, data=data, season=season, debug=True, clobber=clobber, **kwargs) # Publish it everest.fits.MakeFITS(model) shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'), os.path.join(model.dir, model._mission.DVSFile(model.ID, model.season, model.cadence))) # Return an Everest instance return everest.Everest(EPIC, season=season)
python
def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs): """ De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`. :param str fitsfile: The full path to the FITS file :param ndarray aperture: A 2D integer array corresponding to the \ desired photometric aperture (1 = in aperture, 0 = outside \ aperture). Default is to interactively select an aperture. :param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`. :returns: An :py:class:`everest.Everest` instance. """ # Get info EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID'] if season is None: season = pyfits.getheader(fitsfile, 0)['CAMPAIGN'] if season is None or season == "": season = 0 everestfile = os.path.join( everest.missions.k2.TargetDirectory(EPIC, season), everest.missions.k2.FITSFile(EPIC, season)) # De-trend? if clobber or not os.path.exists(everestfile): # Get raw data data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs) # De-trend model = everest.rPLD(EPIC, data=data, season=season, debug=True, clobber=clobber, **kwargs) # Publish it everest.fits.MakeFITS(model) shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'), os.path.join(model.dir, model._mission.DVSFile(model.ID, model.season, model.cadence))) # Return an Everest instance return everest.Everest(EPIC, season=season)
[ "def", "DetrendFITS", "(", "fitsfile", ",", "raw", "=", "False", ",", "season", "=", "None", ",", "clobber", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Get info", "EPIC", "=", "pyfits", ".", "getheader", "(", "fitsfile", ",", "0", ")", "[", "'KEPLERID'", "]", "if", "season", "is", "None", ":", "season", "=", "pyfits", ".", "getheader", "(", "fitsfile", ",", "0", ")", "[", "'CAMPAIGN'", "]", "if", "season", "is", "None", "or", "season", "==", "\"\"", ":", "season", "=", "0", "everestfile", "=", "os", ".", "path", ".", "join", "(", "everest", ".", "missions", ".", "k2", ".", "TargetDirectory", "(", "EPIC", ",", "season", ")", ",", "everest", ".", "missions", ".", "k2", ".", "FITSFile", "(", "EPIC", ",", "season", ")", ")", "# De-trend?", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "everestfile", ")", ":", "# Get raw data", "data", "=", "GetData", "(", "fitsfile", ",", "EPIC", ",", "season", ",", "clobber", "=", "clobber", ",", "*", "*", "kwargs", ")", "# De-trend", "model", "=", "everest", ".", "rPLD", "(", "EPIC", ",", "data", "=", "data", ",", "season", "=", "season", ",", "debug", "=", "True", ",", "clobber", "=", "clobber", ",", "*", "*", "kwargs", ")", "# Publish it", "everest", ".", "fits", ".", "MakeFITS", "(", "model", ")", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "model", ".", "dir", ",", "model", ".", "name", "+", "'.pdf'", ")", ",", "os", ".", "path", ".", "join", "(", "model", ".", "dir", ",", "model", ".", "_mission", ".", "DVSFile", "(", "model", ".", "ID", ",", "model", ".", "season", ",", "model", ".", "cadence", ")", ")", ")", "# Return an Everest instance", "return", "everest", ".", "Everest", "(", "EPIC", ",", "season", "=", "season", ")" ]
De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`. :param str fitsfile: The full path to the FITS file :param ndarray aperture: A 2D integer array corresponding to the \ desired photometric aperture (1 = in aperture, 0 = outside \ aperture). Default is to interactively select an aperture. :param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`. :returns: An :py:class:`everest.Everest` instance.
[ "De", "-", "trend", "a", "K2", "FITS", "file", "using", ":", "py", ":", "class", ":", "everest", ".", "detrender", ".", "rPLD", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/standalone.py#L42-L86
rodluger/everest
everest/standalone.py
GetData
def GetData(fitsfile, EPIC, campaign, clobber=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=False, get_nearby=False, aperture=None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Get the npz file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Create the dir if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) # Check for saved data if not os.path.exists(filename) or clobber: log.info("Fetching data for target...") # Load the tpf with pyfits.open(fitsfile) as f: qdata = f[1].data # Get the header info fitsheader = [pyfits.getheader(fitsfile, 0).cards, pyfits.getheader(fitsfile, 1).cards, pyfits.getheader(fitsfile, 2).cards] # Get a hi res image of the target if get_hires: try: hires = GetHiResImage(EPIC) except ValueError: hires = None else: hires = None # Get nearby sources if get_nearby: try: nearby = GetSources(EPIC) except ValueError: nearby = [] else: nearby = [] # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Get the static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Get the aperture interactively if aperture is None: aperture = ApertureSelector(time[::10], fpix[::10], title='EPIC %d' % EPIC).aperture if np.sum(aperture) == 0: raise ValueError("Empty aperture!") # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, aperture=aperture, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) # Load data = np.load(filename) aperture = data['aperture'][()] pixel_images = data['pixel_images'] nearby = data['nearby'][()] hires = data['hires'][()] fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') else: # We need to collapse the saturated columns saturated = True ncol = 0 fpixnew = [] ferrnew = [] for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = 'custom' data.apertures = dict(custom=aperture) data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] if type(data.mag) is pyfits.card.Undefined: data.mag = np.nan data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
python
def GetData(fitsfile, EPIC, campaign, clobber=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=False, get_nearby=False, aperture=None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Get the npz file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Create the dir if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) # Check for saved data if not os.path.exists(filename) or clobber: log.info("Fetching data for target...") # Load the tpf with pyfits.open(fitsfile) as f: qdata = f[1].data # Get the header info fitsheader = [pyfits.getheader(fitsfile, 0).cards, pyfits.getheader(fitsfile, 1).cards, pyfits.getheader(fitsfile, 2).cards] # Get a hi res image of the target if get_hires: try: hires = GetHiResImage(EPIC) except ValueError: hires = None else: hires = None # Get nearby sources if get_nearby: try: nearby = GetSources(EPIC) except ValueError: nearby = [] else: nearby = [] # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Get the static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Get the aperture interactively if aperture is None: aperture = ApertureSelector(time[::10], fpix[::10], title='EPIC %d' % EPIC).aperture if np.sum(aperture) == 0: raise ValueError("Empty aperture!") # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, aperture=aperture, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) # Load data = np.load(filename) aperture = data['aperture'][()] pixel_images = data['pixel_images'] nearby = data['nearby'][()] hires = data['hires'][()] fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') else: # We need to collapse the saturated columns saturated = True ncol = 0 fpixnew = [] ferrnew = [] for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = 'custom' data.apertures = dict(custom=aperture) data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] if type(data.mag) is pyfits.card.Undefined: data.mag = np.nan data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
[ "def", "GetData", "(", "fitsfile", ",", "EPIC", ",", "campaign", ",", "clobber", "=", "False", ",", "saturation_tolerance", "=", "-", "0.1", ",", "bad_bits", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "11", ",", "12", ",", "13", ",", "14", ",", "16", ",", "17", "]", ",", "get_hires", "=", "False", ",", "get_nearby", "=", "False", ",", "aperture", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Get the npz file name", "filename", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "campaign", ",", "(", "'%09d'", "%", "EPIC", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "EPIC", ")", "[", "4", ":", "]", ",", "'data.npz'", ")", "# Create the dir", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "# Check for saved data", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", "or", "clobber", ":", "log", ".", "info", "(", "\"Fetching data for target...\"", ")", "# Load the tpf", "with", "pyfits", ".", "open", "(", "fitsfile", ")", "as", "f", ":", "qdata", "=", "f", "[", "1", "]", ".", "data", "# Get the header info", "fitsheader", "=", "[", "pyfits", ".", "getheader", "(", "fitsfile", ",", "0", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "fitsfile", ",", "1", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "fitsfile", ",", "2", ")", ".", "cards", "]", "# Get a hi res image of the target", "if", "get_hires", ":", "try", ":", "hires", "=", "GetHiResImage", "(", "EPIC", ")", "except", "ValueError", ":", "hires", "=", "None", "else", ":", "hires", "=", "None", "# Get nearby sources", "if", "get_nearby", ":", "try", ":", "nearby", "=", "GetSources", "(", "EPIC", ")", "except", "ValueError", ":", "nearby", "=", "[", "]", "else", ":", "nearby", "=", "[", "]", "# Get the arrays", "cadn", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'CADENCENO'", ")", ",", "dtype", "=", "'int32'", ")", "time", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'TIME'", ")", ",", "dtype", "=", "'float64'", ")", "fpix", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'FLUX'", ")", ",", "dtype", "=", "'float64'", ")", "fpix_err", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'FLUX_ERR'", ")", ",", "dtype", "=", "'float64'", ")", "qual", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'QUALITY'", ")", ",", "dtype", "=", "int", ")", "# Get rid of NaNs in the time array by interpolating", "naninds", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "time", ")", ")", "time", "=", "Interpolate", "(", "np", ".", "arange", "(", "0", ",", "len", "(", "time", ")", ")", ",", "naninds", ",", "time", ")", "# Get the motion vectors (if available!)", "pc1", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'POS_CORR1'", ")", ",", "dtype", "=", "'float64'", ")", "pc2", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'POS_CORR2'", ")", ",", "dtype", "=", "'float64'", ")", "if", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "pc1", ")", ")", "and", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "pc2", ")", ")", ":", "pc1", "=", "Interpolate", "(", "time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "pc1", ")", ")", ",", "pc1", ")", "pc2", "=", "Interpolate", "(", "time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "pc2", ")", ")", ",", "pc2", ")", "else", ":", "pc1", "=", "None", "pc2", "=", "None", "# Get the static pixel images for plotting", "pixel_images", "=", "[", "fpix", "[", "0", "]", ",", "fpix", "[", "len", "(", "fpix", ")", "//", "2", "]", ",", "fpix", "[", "len", "(", "fpix", ")", "-", "1", "]", "]", "# Get the aperture interactively", "if", "aperture", "is", "None", ":", "aperture", "=", "ApertureSelector", "(", "time", "[", ":", ":", "10", "]", ",", "fpix", "[", ":", ":", "10", "]", ",", "title", "=", "'EPIC %d'", "%", "EPIC", ")", ".", "aperture", "if", "np", ".", "sum", "(", "aperture", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Empty aperture!\"", ")", "# Atomically write to disk.", "# http://stackoverflow.com/questions/2333872/", "# atomic-writing-to-file-with-python", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "f", "=", "NamedTemporaryFile", "(", "\"wb\"", ",", "delete", "=", "False", ")", "np", ".", "savez_compressed", "(", "f", ",", "cadn", "=", "cadn", ",", "time", "=", "time", ",", "fpix", "=", "fpix", ",", "fpix_err", "=", "fpix_err", ",", "qual", "=", "qual", ",", "aperture", "=", "aperture", ",", "pc1", "=", "pc1", ",", "pc2", "=", "pc2", ",", "fitsheader", "=", "fitsheader", ",", "pixel_images", "=", "pixel_images", ",", "nearby", "=", "nearby", ",", "hires", "=", "hires", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "f", ".", "close", "(", ")", "shutil", ".", "move", "(", "f", ".", "name", ",", "filename", ")", "# Load", "data", "=", "np", ".", "load", "(", "filename", ")", "aperture", "=", "data", "[", "'aperture'", "]", "[", "(", ")", "]", "pixel_images", "=", "data", "[", "'pixel_images'", "]", "nearby", "=", "data", "[", "'nearby'", "]", "[", "(", ")", "]", "hires", "=", "data", "[", "'hires'", "]", "[", "(", ")", "]", "fitsheader", "=", "data", "[", "'fitsheader'", "]", "cadn", "=", "data", "[", "'cadn'", "]", "time", "=", "data", "[", "'time'", "]", "fpix", "=", "data", "[", "'fpix'", "]", "fpix_err", "=", "data", "[", "'fpix_err'", "]", "qual", "=", "data", "[", "'qual'", "]", "pc1", "=", "data", "[", "'pc1'", "]", "pc2", "=", "data", "[", "'pc2'", "]", "# Compute the saturation flux and the 97.5th percentile", "# flux in each pixel of the aperture. We're going", "# to compare these to decide if the star is saturated.", "satflx", "=", "SaturationFlux", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "*", "(", "1.", "+", "saturation_tolerance", ")", "f97", "=", "np", ".", "zeros", "(", "(", "fpix", ".", "shape", "[", "1", "]", ",", "fpix", ".", "shape", "[", "2", "]", ")", ")", "for", "i", "in", "range", "(", "fpix", ".", "shape", "[", "1", "]", ")", ":", "for", "j", "in", "range", "(", "fpix", ".", "shape", "[", "2", "]", ")", ":", "if", "aperture", "[", "i", ",", "j", "]", ":", "# Let's remove NaNs...", "tmp", "=", "np", ".", "delete", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ")", ")", ")", "# ... and really bad outliers...", "if", "len", "(", "tmp", ")", ":", "f", "=", "SavGol", "(", "tmp", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "bad", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "10.", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "10.", "*", "MAD", ")", ")", "[", "0", "]", "np", ".", "delete", "(", "tmp", ",", "bad", ")", "# ... so we can compute the 97.5th percentile flux", "i97", "=", "int", "(", "0.975", "*", "len", "(", "tmp", ")", ")", "tmp", "=", "tmp", "[", "np", ".", "argsort", "(", "tmp", ")", "[", "i97", "]", "]", "f97", "[", "i", ",", "j", "]", "=", "tmp", "# Check if any of the pixels are actually saturated", "if", "np", ".", "nanmax", "(", "f97", ")", "<=", "satflx", ":", "log", ".", "info", "(", "\"No saturated columns detected.\"", ")", "saturated", "=", "False", "aperture", "[", "np", ".", "isnan", "(", "fpix", "[", "0", "]", ")", "]", "=", "0", "ap", "=", "np", ".", "where", "(", "aperture", "&", "1", ")", "fpix2D", "=", "np", ".", "array", "(", "[", "f", "[", "ap", "]", "for", "f", "in", "fpix", "]", ",", "dtype", "=", "'float64'", ")", "fpix_err2D", "=", "np", ".", "array", "(", "[", "p", "[", "ap", "]", "for", "p", "in", "fpix_err", "]", ",", "dtype", "=", "'float64'", ")", "else", ":", "# We need to collapse the saturated columns", "saturated", "=", "True", "ncol", "=", "0", "fpixnew", "=", "[", "]", "ferrnew", "=", "[", "]", "for", "j", "in", "range", "(", "aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "np", ".", "any", "(", "f97", "[", ":", ",", "j", "]", ">", "satflx", ")", ":", "marked", "=", "False", "collapsed", "=", "np", ".", "zeros", "(", "len", "(", "fpix", "[", ":", ",", "0", ",", "0", "]", ")", ")", "collapsed_err2", "=", "np", ".", "zeros", "(", "len", "(", "fpix", "[", ":", ",", "0", ",", "0", "]", ")", ")", "for", "i", "in", "range", "(", "aperture", ".", "shape", "[", "0", "]", ")", ":", "if", "aperture", "[", "i", ",", "j", "]", ":", "if", "not", "marked", ":", "aperture", "[", "i", ",", "j", "]", "=", "AP_COLLAPSED_PIXEL", "marked", "=", "True", "else", ":", "aperture", "[", "i", ",", "j", "]", "=", "AP_SATURATED_PIXEL", "collapsed", "+=", "fpix", "[", ":", ",", "i", ",", "j", "]", "collapsed_err2", "+=", "fpix_err", "[", ":", ",", "i", ",", "j", "]", "**", "2", "if", "np", ".", "any", "(", "collapsed", ")", ":", "fpixnew", ".", "append", "(", "collapsed", ")", "ferrnew", ".", "append", "(", "np", ".", "sqrt", "(", "collapsed_err2", ")", ")", "ncol", "+=", "1", "else", ":", "for", "i", "in", "range", "(", "aperture", ".", "shape", "[", "0", "]", ")", ":", "if", "aperture", "[", "i", ",", "j", "]", ":", "fpixnew", ".", "append", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ")", "ferrnew", ".", "append", "(", "fpix_err", "[", ":", ",", "i", ",", "j", "]", ")", "fpix2D", "=", "np", ".", "array", "(", "fpixnew", ")", ".", "T", "fpix_err2D", "=", "np", ".", "array", "(", "ferrnew", ")", ".", "T", "log", ".", "info", "(", "\"Collapsed %d saturated column(s).\"", "%", "ncol", ")", "# Compute the background", "binds", "=", "np", ".", "where", "(", "aperture", "^", "1", ")", "if", "RemoveBackground", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "and", "(", "len", "(", "binds", "[", "0", "]", ")", ">", "0", ")", ":", "bkg", "=", "np", ".", "nanmedian", "(", "np", ".", "array", "(", "[", "f", "[", "binds", "]", "for", "f", "in", "fpix", "]", ",", "dtype", "=", "'float64'", ")", ",", "axis", "=", "1", ")", "# Uncertainty of the median:", "# http://davidmlane.com/hyperstat/A106993.html", "bkg_err", "=", "1.253", "*", "np", ".", "nanmedian", "(", "np", ".", "array", "(", "[", "e", "[", "binds", "]", "for", "e", "in", "fpix_err", "]", ",", "dtype", "=", "'float64'", ")", ",", "axis", "=", "1", ")", "/", "np", ".", "sqrt", "(", "len", "(", "binds", "[", "0", "]", ")", ")", "bkg", "=", "bkg", ".", "reshape", "(", "-", "1", ",", "1", ")", "bkg_err", "=", "bkg_err", ".", "reshape", "(", "-", "1", ",", "1", ")", "else", ":", "bkg", "=", "0.", "bkg_err", "=", "0.", "# Make everything 2D and remove the background", "fpix", "=", "fpix2D", "-", "bkg", "fpix_err", "=", "np", ".", "sqrt", "(", "fpix_err2D", "**", "2", "+", "bkg_err", "**", "2", ")", "flux", "=", "np", ".", "sum", "(", "fpix", ",", "axis", "=", "1", ")", "# Get NaN data points", "nanmask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", "|", "(", "flux", "==", "0", ")", ")", "[", "0", "]", "# Get flagged data points -- we won't train our model on them", "badmask", "=", "[", "]", "for", "b", "in", "bad_bits", ":", "badmask", "+=", "list", "(", "np", ".", "where", "(", "qual", "&", "2", "**", "(", "b", "-", "1", ")", ")", "[", "0", "]", ")", "# Flag >10 sigma outliers -- same thing.", "tmpmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "badmask", ",", "nanmask", "]", ")", ")", ")", ")", "t", "=", "np", ".", "delete", "(", "time", ",", "tmpmask", ")", "f", "=", "np", ".", "delete", "(", "flux", ",", "tmpmask", ")", "f", "=", "SavGol", "(", "f", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "bad", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "10.", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "10.", "*", "MAD", ")", ")", "[", "0", "]", "badmask", ".", "extend", "(", "[", "np", ".", "argmax", "(", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "bad", "]", ")", "# Campaign 2 hack: the first day or two are screwed up", "if", "campaign", "==", "2", ":", "badmask", ".", "extend", "(", "np", ".", "where", "(", "time", "<", "2061.5", ")", "[", "0", "]", ")", "# Finalize the mask", "badmask", "=", "np", ".", "array", "(", "sorted", "(", "list", "(", "set", "(", "badmask", ")", ")", ")", ")", "# Interpolate the nans", "fpix", "=", "Interpolate", "(", "time", ",", "nanmask", ",", "fpix", ")", "fpix_err", "=", "Interpolate", "(", "time", ",", "nanmask", ",", "fpix_err", ")", "# Return", "data", "=", "DataContainer", "(", ")", "data", ".", "ID", "=", "EPIC", "data", ".", "campaign", "=", "campaign", "data", ".", "cadn", "=", "cadn", "data", ".", "time", "=", "time", "data", ".", "fpix", "=", "fpix", "data", ".", "fpix_err", "=", "fpix_err", "data", ".", "nanmask", "=", "nanmask", "data", ".", "badmask", "=", "badmask", "data", ".", "aperture", "=", "aperture", "data", ".", "aperture_name", "=", "'custom'", "data", ".", "apertures", "=", "dict", "(", "custom", "=", "aperture", ")", "data", ".", "quality", "=", "qual", "data", ".", "Xpos", "=", "pc1", "data", ".", "Ypos", "=", "pc2", "data", ".", "meta", "=", "fitsheader", "data", ".", "mag", "=", "fitsheader", "[", "0", "]", "[", "'KEPMAG'", "]", "[", "1", "]", "if", "type", "(", "data", ".", "mag", ")", "is", "pyfits", ".", "card", ".", "Undefined", ":", "data", ".", "mag", "=", "np", ".", "nan", "data", ".", "pixel_images", "=", "pixel_images", "data", ".", "nearby", "=", "nearby", "data", ".", "hires", "=", "hires", "data", ".", "saturated", "=", "saturated", "data", ".", "bkg", "=", "bkg", "return", "data" ]
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True`
[ "Returns", "a", ":", "py", ":", "obj", ":", "DataContainer", "instance", "with", "the", "raw", "data", "for", "the", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/standalone.py#L380-L651
rodluger/everest
everest/dvs.py
DVS.title
def title(self): ''' Returns the axis instance where the title will be printed ''' return self.title_left(on=False), self.title_center(on=False), \ self.title_right(on=False)
python
def title(self): ''' Returns the axis instance where the title will be printed ''' return self.title_left(on=False), self.title_center(on=False), \ self.title_right(on=False)
[ "def", "title", "(", "self", ")", ":", "return", "self", ".", "title_left", "(", "on", "=", "False", ")", ",", "self", ".", "title_center", "(", "on", "=", "False", ")", ",", "self", ".", "title_right", "(", "on", "=", "False", ")" ]
Returns the axis instance where the title will be printed
[ "Returns", "the", "axis", "instance", "where", "the", "title", "will", "be", "printed" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L132-L139
rodluger/everest
everest/dvs.py
DVS.footer
def footer(self): ''' Returns the axis instance where the footer will be printed ''' return self.footer_left(on=False), self.footer_center(on=False), \ self.footer_right(on=False)
python
def footer(self): ''' Returns the axis instance where the footer will be printed ''' return self.footer_left(on=False), self.footer_center(on=False), \ self.footer_right(on=False)
[ "def", "footer", "(", "self", ")", ":", "return", "self", ".", "footer_left", "(", "on", "=", "False", ")", ",", "self", ".", "footer_center", "(", "on", "=", "False", ")", ",", "self", ".", "footer_right", "(", "on", "=", "False", ")" ]
Returns the axis instance where the footer will be printed
[ "Returns", "the", "axis", "instance", "where", "the", "footer", "will", "be", "printed" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L141-L148
rodluger/everest
everest/dvs.py
DVS.top_right
def top_right(self): ''' Returns the axis instance at the top right of the page, where the postage stamp and aperture is displayed ''' res = self.body_top_right[self.tcount]() self.tcount += 1 return res
python
def top_right(self): ''' Returns the axis instance at the top right of the page, where the postage stamp and aperture is displayed ''' res = self.body_top_right[self.tcount]() self.tcount += 1 return res
[ "def", "top_right", "(", "self", ")", ":", "res", "=", "self", ".", "body_top_right", "[", "self", ".", "tcount", "]", "(", ")", "self", ".", "tcount", "+=", "1", "return", "res" ]
Returns the axis instance at the top right of the page, where the postage stamp and aperture is displayed
[ "Returns", "the", "axis", "instance", "at", "the", "top", "right", "of", "the", "page", "where", "the", "postage", "stamp", "and", "aperture", "is", "displayed" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L150-L159
rodluger/everest
everest/dvs.py
DVS.left
def left(self): ''' Returns the current axis instance on the left side of the page where each successive light curve is displayed ''' res = self.body_left[self.lcount]() self.lcount += 1 return res
python
def left(self): ''' Returns the current axis instance on the left side of the page where each successive light curve is displayed ''' res = self.body_left[self.lcount]() self.lcount += 1 return res
[ "def", "left", "(", "self", ")", ":", "res", "=", "self", ".", "body_left", "[", "self", ".", "lcount", "]", "(", ")", "self", ".", "lcount", "+=", "1", "return", "res" ]
Returns the current axis instance on the left side of the page where each successive light curve is displayed
[ "Returns", "the", "current", "axis", "instance", "on", "the", "left", "side", "of", "the", "page", "where", "each", "successive", "light", "curve", "is", "displayed" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L170-L179
rodluger/everest
everest/dvs.py
DVS.right
def right(self): ''' Returns the current axis instance on the right side of the page, where cross-validation information is displayed ''' res = self.body_right[self.rcount]() self.rcount += 1 return res
python
def right(self): ''' Returns the current axis instance on the right side of the page, where cross-validation information is displayed ''' res = self.body_right[self.rcount]() self.rcount += 1 return res
[ "def", "right", "(", "self", ")", ":", "res", "=", "self", ".", "body_right", "[", "self", ".", "rcount", "]", "(", ")", "self", ".", "rcount", "+=", "1", "return", "res" ]
Returns the current axis instance on the right side of the page, where cross-validation information is displayed
[ "Returns", "the", "current", "axis", "instance", "on", "the", "right", "side", "of", "the", "page", "where", "cross", "-", "validation", "information", "is", "displayed" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L181-L190
rodluger/everest
everest/dvs.py
CBV.body
def body(self): ''' Returns the axis instance where the light curves will be shown ''' res = self._body[self.bcount]() self.bcount += 1 return res
python
def body(self): ''' Returns the axis instance where the light curves will be shown ''' res = self._body[self.bcount]() self.bcount += 1 return res
[ "def", "body", "(", "self", ")", ":", "res", "=", "self", ".", "_body", "[", "self", ".", "bcount", "]", "(", ")", "self", ".", "bcount", "+=", "1", "return", "res" ]
Returns the axis instance where the light curves will be shown
[ "Returns", "the", "axis", "instance", "where", "the", "light", "curves", "will", "be", "shown" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/dvs.py#L230-L238
lsbardel/python-stdnet
stdnet/odm/globals.py
hashmodel
def hashmodel(model, library=None): '''Calculate the Hash id of metaclass ``meta``''' library = library or 'python-stdnet' meta = model._meta sha = hashlib.sha1(to_bytes('{0}({1})'.format(library, meta))) hash = sha.hexdigest()[:8] meta.hash = hash if hash in _model_dict: raise KeyError('Model "{0}" already in hash table.\ Rename your model or the module containing the model.'.format(meta)) _model_dict[hash] = model
python
def hashmodel(model, library=None): '''Calculate the Hash id of metaclass ``meta``''' library = library or 'python-stdnet' meta = model._meta sha = hashlib.sha1(to_bytes('{0}({1})'.format(library, meta))) hash = sha.hexdigest()[:8] meta.hash = hash if hash in _model_dict: raise KeyError('Model "{0}" already in hash table.\ Rename your model or the module containing the model.'.format(meta)) _model_dict[hash] = model
[ "def", "hashmodel", "(", "model", ",", "library", "=", "None", ")", ":", "library", "=", "library", "or", "'python-stdnet'", "meta", "=", "model", ".", "_meta", "sha", "=", "hashlib", ".", "sha1", "(", "to_bytes", "(", "'{0}({1})'", ".", "format", "(", "library", ",", "meta", ")", ")", ")", "hash", "=", "sha", ".", "hexdigest", "(", ")", "[", ":", "8", "]", "meta", ".", "hash", "=", "hash", "if", "hash", "in", "_model_dict", ":", "raise", "KeyError", "(", "'Model \"{0}\" already in hash table.\\\r\n Rename your model or the module containing the model.'", ".", "format", "(", "meta", ")", ")", "_model_dict", "[", "hash", "]", "=", "model" ]
Calculate the Hash id of metaclass ``meta``
[ "Calculate", "the", "Hash", "id", "of", "metaclass", "meta" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/globals.py#L40-L50
lsbardel/python-stdnet
stdnet/odm/globals.py
Event.bind
def bind(self, callback, sender=None): '''Bind a ``callback`` for a given ``sender``.''' key = (_make_id(callback), _make_id(sender)) self.callbacks.append((key, callback))
python
def bind(self, callback, sender=None): '''Bind a ``callback`` for a given ``sender``.''' key = (_make_id(callback), _make_id(sender)) self.callbacks.append((key, callback))
[ "def", "bind", "(", "self", ",", "callback", ",", "sender", "=", "None", ")", ":", "key", "=", "(", "_make_id", "(", "callback", ")", ",", "_make_id", "(", "sender", ")", ")", "self", ".", "callbacks", ".", "append", "(", "(", "key", ",", "callback", ")", ")" ]
Bind a ``callback`` for a given ``sender``.
[ "Bind", "a", "callback", "for", "a", "given", "sender", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/globals.py#L64-L67
lsbardel/python-stdnet
stdnet/odm/globals.py
Event.fire
def fire(self, sender=None, **params): '''Fire callbacks from a ``sender``.''' keys = (_make_id(None), _make_id(sender)) results = [] for (_, key), callback in self.callbacks: if key in keys: results.append(callback(self, sender, **params)) return results
python
def fire(self, sender=None, **params): '''Fire callbacks from a ``sender``.''' keys = (_make_id(None), _make_id(sender)) results = [] for (_, key), callback in self.callbacks: if key in keys: results.append(callback(self, sender, **params)) return results
[ "def", "fire", "(", "self", ",", "sender", "=", "None", ",", "*", "*", "params", ")", ":", "keys", "=", "(", "_make_id", "(", "None", ")", ",", "_make_id", "(", "sender", ")", ")", "results", "=", "[", "]", "for", "(", "_", ",", "key", ")", ",", "callback", "in", "self", ".", "callbacks", ":", "if", "key", "in", "keys", ":", "results", ".", "append", "(", "callback", "(", "self", ",", "sender", ",", "*", "*", "params", ")", ")", "return", "results" ]
Fire callbacks from a ``sender``.
[ "Fire", "callbacks", "from", "a", "sender", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/globals.py#L69-L76
lsbardel/python-stdnet
stdnet/backends/redisb/client/prefixed.py
PrefixedRedisMixin.execute_command
def execute_command(self, cmnd, *args, **options): "Execute a command and return a parsed response" args, options = self.preprocess_command(cmnd, *args, **options) return self.client.execute_command(cmnd, *args, **options)
python
def execute_command(self, cmnd, *args, **options): "Execute a command and return a parsed response" args, options = self.preprocess_command(cmnd, *args, **options) return self.client.execute_command(cmnd, *args, **options)
[ "def", "execute_command", "(", "self", ",", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")", ":", "args", ",", "options", "=", "self", ".", "preprocess_command", "(", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")", "return", "self", ".", "client", ".", "execute_command", "(", "cmnd", ",", "*", "args", ",", "*", "*", "options", ")" ]
Execute a command and return a parsed response
[ "Execute", "a", "command", "and", "return", "a", "parsed", "response" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/prefixed.py#L122-L125
rodluger/everest
everest/missions/k2/utils.py
_range10_90
def _range10_90(x): ''' Returns the 10th-90th percentile range of array :py:obj:`x`. ''' x = np.delete(x, np.where(np.isnan(x))) i = np.argsort(x) a = int(0.1 * len(x)) b = int(0.9 * len(x)) return x[i][b] - x[i][a]
python
def _range10_90(x): ''' Returns the 10th-90th percentile range of array :py:obj:`x`. ''' x = np.delete(x, np.where(np.isnan(x))) i = np.argsort(x) a = int(0.1 * len(x)) b = int(0.9 * len(x)) return x[i][b] - x[i][a]
[ "def", "_range10_90", "(", "x", ")", ":", "x", "=", "np", ".", "delete", "(", "x", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "x", ")", ")", ")", "i", "=", "np", ".", "argsort", "(", "x", ")", "a", "=", "int", "(", "0.1", "*", "len", "(", "x", ")", ")", "b", "=", "int", "(", "0.9", "*", "len", "(", "x", ")", ")", "return", "x", "[", "i", "]", "[", "b", "]", "-", "x", "[", "i", "]", "[", "a", "]" ]
Returns the 10th-90th percentile range of array :py:obj:`x`.
[ "Returns", "the", "10th", "-", "90th", "percentile", "range", "of", "array", ":", "py", ":", "obj", ":", "x", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L45-L55
rodluger/everest
everest/missions/k2/utils.py
Campaign
def Campaign(EPIC, **kwargs): ''' Returns the campaign number(s) for a given EPIC target. If target is not found, returns :py:obj:`None`. :param int EPIC: The EPIC number of the target. ''' campaigns = [] for campaign, stars in GetK2Stars().items(): if EPIC in [s[0] for s in stars]: campaigns.append(campaign) if len(campaigns) == 0: return None elif len(campaigns) == 1: return campaigns[0] else: return campaigns
python
def Campaign(EPIC, **kwargs): ''' Returns the campaign number(s) for a given EPIC target. If target is not found, returns :py:obj:`None`. :param int EPIC: The EPIC number of the target. ''' campaigns = [] for campaign, stars in GetK2Stars().items(): if EPIC in [s[0] for s in stars]: campaigns.append(campaign) if len(campaigns) == 0: return None elif len(campaigns) == 1: return campaigns[0] else: return campaigns
[ "def", "Campaign", "(", "EPIC", ",", "*", "*", "kwargs", ")", ":", "campaigns", "=", "[", "]", "for", "campaign", ",", "stars", "in", "GetK2Stars", "(", ")", ".", "items", "(", ")", ":", "if", "EPIC", "in", "[", "s", "[", "0", "]", "for", "s", "in", "stars", "]", ":", "campaigns", ".", "append", "(", "campaign", ")", "if", "len", "(", "campaigns", ")", "==", "0", ":", "return", "None", "elif", "len", "(", "campaigns", ")", "==", "1", ":", "return", "campaigns", "[", "0", "]", "else", ":", "return", "campaigns" ]
Returns the campaign number(s) for a given EPIC target. If target is not found, returns :py:obj:`None`. :param int EPIC: The EPIC number of the target.
[ "Returns", "the", "campaign", "number", "(", "s", ")", "for", "a", "given", "EPIC", "target", ".", "If", "target", "is", "not", "found", "returns", ":", "py", ":", "obj", ":", "None", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L152-L170
rodluger/everest
everest/missions/k2/utils.py
GetK2Stars
def GetK2Stars(clobber=False): ''' Download and return a :py:obj:`dict` of all *K2* stars organized by campaign. Saves each campaign to a `.stars` file in the `everest/missions/k2/tables` directory. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` .. note:: The keys of the dictionary returned by this function are the \ (integer) numbers of each campaign. Each item in the \ :py:obj:`dict` is a list of the targets in the corresponding \ campaign, and each item in that list is in turn a list of the \ following: **EPIC number** (:py:class:`int`), \ **Kp magnitude** (:py:class:`float`), **CCD channel number** \ (:py:class:`int`), and **short cadence available** \ (:py:class:`bool`). ''' # Download if clobber: print("Downloading K2 star list...") stars = kplr_client.k2_star_info() print("Writing star list to disk...") for campaign in stars.keys(): if not os.path.exists(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables')): os.makedirs(os.path.join( EVEREST_SRC, 'missions', 'k2', 'tables')) with open(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d.stars' % campaign), 'w') as f: for star in stars[campaign]: print(",".join([str(s) for s in star]), file=f) # Return res = {} for campaign in K2_CAMPAIGNS: f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d.stars' % campaign) if os.path.exists(f): with open(f, 'r') as file: lines = file.readlines() if len(lines[0].split(',')) == 4: # EPIC number, Kp magnitude, channel number, # short cadence available? stars = [[int(l.split(',')[0]), _float(l.split(',')[1]), int(l.split(',')[2]), eval(l.split(',')[3])] for l in lines] else: stars = [[int(l), np.nan, -1, None] for l in lines] res.update({campaign: stars}) return res
python
def GetK2Stars(clobber=False): ''' Download and return a :py:obj:`dict` of all *K2* stars organized by campaign. Saves each campaign to a `.stars` file in the `everest/missions/k2/tables` directory. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` .. note:: The keys of the dictionary returned by this function are the \ (integer) numbers of each campaign. Each item in the \ :py:obj:`dict` is a list of the targets in the corresponding \ campaign, and each item in that list is in turn a list of the \ following: **EPIC number** (:py:class:`int`), \ **Kp magnitude** (:py:class:`float`), **CCD channel number** \ (:py:class:`int`), and **short cadence available** \ (:py:class:`bool`). ''' # Download if clobber: print("Downloading K2 star list...") stars = kplr_client.k2_star_info() print("Writing star list to disk...") for campaign in stars.keys(): if not os.path.exists(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables')): os.makedirs(os.path.join( EVEREST_SRC, 'missions', 'k2', 'tables')) with open(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d.stars' % campaign), 'w') as f: for star in stars[campaign]: print(",".join([str(s) for s in star]), file=f) # Return res = {} for campaign in K2_CAMPAIGNS: f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d.stars' % campaign) if os.path.exists(f): with open(f, 'r') as file: lines = file.readlines() if len(lines[0].split(',')) == 4: # EPIC number, Kp magnitude, channel number, # short cadence available? stars = [[int(l.split(',')[0]), _float(l.split(',')[1]), int(l.split(',')[2]), eval(l.split(',')[3])] for l in lines] else: stars = [[int(l), np.nan, -1, None] for l in lines] res.update({campaign: stars}) return res
[ "def", "GetK2Stars", "(", "clobber", "=", "False", ")", ":", "# Download", "if", "clobber", ":", "print", "(", "\"Downloading K2 star list...\"", ")", "stars", "=", "kplr_client", ".", "k2_star_info", "(", ")", "print", "(", "\"Writing star list to disk...\"", ")", "for", "campaign", "in", "stars", ".", "keys", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d.stars'", "%", "campaign", ")", ",", "'w'", ")", "as", "f", ":", "for", "star", "in", "stars", "[", "campaign", "]", ":", "print", "(", "\",\"", ".", "join", "(", "[", "str", "(", "s", ")", "for", "s", "in", "star", "]", ")", ",", "file", "=", "f", ")", "# Return", "res", "=", "{", "}", "for", "campaign", "in", "K2_CAMPAIGNS", ":", "f", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d.stars'", "%", "campaign", ")", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "with", "open", "(", "f", ",", "'r'", ")", "as", "file", ":", "lines", "=", "file", ".", "readlines", "(", ")", "if", "len", "(", "lines", "[", "0", "]", ".", "split", "(", "','", ")", ")", "==", "4", ":", "# EPIC number, Kp magnitude, channel number,", "# short cadence available?", "stars", "=", "[", "[", "int", "(", "l", ".", "split", "(", "','", ")", "[", "0", "]", ")", ",", "_float", "(", "l", ".", "split", "(", "','", ")", "[", "1", "]", ")", ",", "int", "(", "l", ".", "split", "(", "','", ")", "[", "2", "]", ")", ",", "eval", "(", "l", ".", "split", "(", "','", ")", "[", "3", "]", ")", "]", "for", "l", "in", "lines", "]", "else", ":", "stars", "=", "[", "[", "int", "(", "l", ")", ",", "np", ".", "nan", ",", "-", "1", ",", "None", "]", "for", "l", "in", "lines", "]", "res", ".", "update", "(", "{", "campaign", ":", "stars", "}", ")", "return", "res" ]
Download and return a :py:obj:`dict` of all *K2* stars organized by campaign. Saves each campaign to a `.stars` file in the `everest/missions/k2/tables` directory. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` .. note:: The keys of the dictionary returned by this function are the \ (integer) numbers of each campaign. Each item in the \ :py:obj:`dict` is a list of the targets in the corresponding \ campaign, and each item in that list is in turn a list of the \ following: **EPIC number** (:py:class:`int`), \ **Kp magnitude** (:py:class:`float`), **CCD channel number** \ (:py:class:`int`), and **short cadence available** \ (:py:class:`bool`).
[ "Download", "and", "return", "a", ":", "py", ":", "obj", ":", "dict", "of", "all", "*", "K2", "*", "stars", "organized", "by", "campaign", ".", "Saves", "each", "campaign", "to", "a", ".", "stars", "file", "in", "the", "everest", "/", "missions", "/", "k2", "/", "tables", "directory", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L173-L227
rodluger/everest
everest/missions/k2/utils.py
GetK2Campaign
def GetK2Campaign(campaign, clobber=False, split=False, epics_only=False, cadence='lc'): ''' Return all stars in a given *K2* campaign. :param campaign: The *K2* campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` in \ the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \ :py:obj:`X`. :param bool clobber: If :py:obj:`True`, download and overwrite existing \ files. Default :py:obj:`False` :param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \ :py:class:`int`, returns each of the subcampaigns as a separate \ list. Default :py:obj:`False` :param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \ If :py:obj:`False`, returns metadata associated with each target. \ Default :py:obj:`False` :param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \ Default :py:obj:`lc`. ''' all = GetK2Stars(clobber=clobber) if int(campaign) in all.keys(): all = all[int(campaign)] else: return [] if cadence == 'sc': all = [a for a in all if a[3]] if epics_only: all = [a[0] for a in all] if type(campaign) is int or type(campaign) is np.int64: if not split: return all else: all_split = list(Chunks(all, len(all) // 10)) # HACK: Sometimes we're left with a few targets # dangling at the end. Insert them back evenly # into the first few subcampaigns. if len(all_split) > 10: tmp1 = all_split[:10] tmp2 = all_split[10:] for n in range(len(tmp2)): tmp1[n] = np.append(tmp1[n], tmp2[n]) all_split = tmp1 res = [] for subcampaign in range(10): res.append(all_split[subcampaign]) return res elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) return list(Chunks(all, len(all) // 10))[subcampaign] else: raise Exception('Argument `subcampaign` must be an `int` ' + 'or a `float` in the form `X.Y`')
python
def GetK2Campaign(campaign, clobber=False, split=False, epics_only=False, cadence='lc'): ''' Return all stars in a given *K2* campaign. :param campaign: The *K2* campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` in \ the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \ :py:obj:`X`. :param bool clobber: If :py:obj:`True`, download and overwrite existing \ files. Default :py:obj:`False` :param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \ :py:class:`int`, returns each of the subcampaigns as a separate \ list. Default :py:obj:`False` :param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \ If :py:obj:`False`, returns metadata associated with each target. \ Default :py:obj:`False` :param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \ Default :py:obj:`lc`. ''' all = GetK2Stars(clobber=clobber) if int(campaign) in all.keys(): all = all[int(campaign)] else: return [] if cadence == 'sc': all = [a for a in all if a[3]] if epics_only: all = [a[0] for a in all] if type(campaign) is int or type(campaign) is np.int64: if not split: return all else: all_split = list(Chunks(all, len(all) // 10)) # HACK: Sometimes we're left with a few targets # dangling at the end. Insert them back evenly # into the first few subcampaigns. if len(all_split) > 10: tmp1 = all_split[:10] tmp2 = all_split[10:] for n in range(len(tmp2)): tmp1[n] = np.append(tmp1[n], tmp2[n]) all_split = tmp1 res = [] for subcampaign in range(10): res.append(all_split[subcampaign]) return res elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) return list(Chunks(all, len(all) // 10))[subcampaign] else: raise Exception('Argument `subcampaign` must be an `int` ' + 'or a `float` in the form `X.Y`')
[ "def", "GetK2Campaign", "(", "campaign", ",", "clobber", "=", "False", ",", "split", "=", "False", ",", "epics_only", "=", "False", ",", "cadence", "=", "'lc'", ")", ":", "all", "=", "GetK2Stars", "(", "clobber", "=", "clobber", ")", "if", "int", "(", "campaign", ")", "in", "all", ".", "keys", "(", ")", ":", "all", "=", "all", "[", "int", "(", "campaign", ")", "]", "else", ":", "return", "[", "]", "if", "cadence", "==", "'sc'", ":", "all", "=", "[", "a", "for", "a", "in", "all", "if", "a", "[", "3", "]", "]", "if", "epics_only", ":", "all", "=", "[", "a", "[", "0", "]", "for", "a", "in", "all", "]", "if", "type", "(", "campaign", ")", "is", "int", "or", "type", "(", "campaign", ")", "is", "np", ".", "int64", ":", "if", "not", "split", ":", "return", "all", "else", ":", "all_split", "=", "list", "(", "Chunks", "(", "all", ",", "len", "(", "all", ")", "//", "10", ")", ")", "# HACK: Sometimes we're left with a few targets", "# dangling at the end. Insert them back evenly", "# into the first few subcampaigns.", "if", "len", "(", "all_split", ")", ">", "10", ":", "tmp1", "=", "all_split", "[", ":", "10", "]", "tmp2", "=", "all_split", "[", "10", ":", "]", "for", "n", "in", "range", "(", "len", "(", "tmp2", ")", ")", ":", "tmp1", "[", "n", "]", "=", "np", ".", "append", "(", "tmp1", "[", "n", "]", ",", "tmp2", "[", "n", "]", ")", "all_split", "=", "tmp1", "res", "=", "[", "]", "for", "subcampaign", "in", "range", "(", "10", ")", ":", "res", ".", "append", "(", "all_split", "[", "subcampaign", "]", ")", "return", "res", "elif", "type", "(", "campaign", ")", "is", "float", ":", "x", ",", "y", "=", "divmod", "(", "campaign", ",", "1", ")", "campaign", "=", "int", "(", "x", ")", "subcampaign", "=", "round", "(", "y", "*", "10", ")", "return", "list", "(", "Chunks", "(", "all", ",", "len", "(", "all", ")", "//", "10", ")", ")", "[", "subcampaign", "]", "else", ":", "raise", "Exception", "(", "'Argument `subcampaign` must be an `int` '", "+", "'or a `float` in the form `X.Y`'", ")" ]
Return all stars in a given *K2* campaign. :param campaign: The *K2* campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` in \ the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \ :py:obj:`X`. :param bool clobber: If :py:obj:`True`, download and overwrite existing \ files. Default :py:obj:`False` :param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \ :py:class:`int`, returns each of the subcampaigns as a separate \ list. Default :py:obj:`False` :param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \ If :py:obj:`False`, returns metadata associated with each target. \ Default :py:obj:`False` :param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \ Default :py:obj:`lc`.
[ "Return", "all", "stars", "in", "a", "given", "*", "K2", "*", "campaign", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L230-L291
rodluger/everest
everest/missions/k2/utils.py
Channel
def Channel(EPIC, campaign=None): ''' Returns the channel number for a given EPIC target. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) try: stars = GetK2Stars()[campaign] except KeyError: # Not sure what else to do here! log.warn("Unknown channel for target. Defaulting to channel 2.") return 2 i = np.argmax([s[0] == EPIC for s in stars]) return stars[i][2]
python
def Channel(EPIC, campaign=None): ''' Returns the channel number for a given EPIC target. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) try: stars = GetK2Stars()[campaign] except KeyError: # Not sure what else to do here! log.warn("Unknown channel for target. Defaulting to channel 2.") return 2 i = np.argmax([s[0] == EPIC for s in stars]) return stars[i][2]
[ "def", "Channel", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "if", "campaign", "is", "None", ":", "campaign", "=", "Campaign", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "try", ":", "stars", "=", "GetK2Stars", "(", ")", "[", "campaign", "]", "except", "KeyError", ":", "# Not sure what else to do here!", "log", ".", "warn", "(", "\"Unknown channel for target. Defaulting to channel 2.\"", ")", "return", "2", "i", "=", "np", ".", "argmax", "(", "[", "s", "[", "0", "]", "==", "EPIC", "for", "s", "in", "stars", "]", ")", "return", "stars", "[", "i", "]", "[", "2", "]" ]
Returns the channel number for a given EPIC target.
[ "Returns", "the", "channel", "number", "for", "a", "given", "EPIC", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L294-L312
rodluger/everest
everest/missions/k2/utils.py
Module
def Module(EPIC, campaign=None): ''' Returns the module number for a given EPIC target. ''' channel = Channel(EPIC, campaign=campaign) nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} for c in [channel, channel - 1, channel - 2, channel - 3]: if c in nums.values(): for mod, chan in nums.items(): if chan == c: return mod return None
python
def Module(EPIC, campaign=None): ''' Returns the module number for a given EPIC target. ''' channel = Channel(EPIC, campaign=campaign) nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} for c in [channel, channel - 1, channel - 2, channel - 3]: if c in nums.values(): for mod, chan in nums.items(): if chan == c: return mod return None
[ "def", "Module", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "channel", "=", "Channel", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "nums", "=", "{", "2", ":", "1", ",", "3", ":", "5", ",", "4", ":", "9", ",", "6", ":", "13", ",", "7", ":", "17", ",", "8", ":", "21", ",", "9", ":", "25", ",", "10", ":", "29", ",", "11", ":", "33", ",", "12", ":", "37", ",", "13", ":", "41", ",", "14", ":", "45", ",", "15", ":", "49", ",", "16", ":", "53", ",", "17", ":", "57", ",", "18", ":", "61", ",", "19", ":", "65", ",", "20", ":", "69", ",", "22", ":", "73", ",", "23", ":", "77", ",", "24", ":", "81", "}", "for", "c", "in", "[", "channel", ",", "channel", "-", "1", ",", "channel", "-", "2", ",", "channel", "-", "3", "]", ":", "if", "c", "in", "nums", ".", "values", "(", ")", ":", "for", "mod", ",", "chan", "in", "nums", ".", "items", "(", ")", ":", "if", "chan", "==", "c", ":", "return", "mod", "return", "None" ]
Returns the module number for a given EPIC target.
[ "Returns", "the", "module", "number", "for", "a", "given", "EPIC", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L315-L331
rodluger/everest
everest/missions/k2/utils.py
Channels
def Channels(module): ''' Returns the channels contained in the given K2 module. ''' nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} if module in nums: return [nums[module], nums[module] + 1, nums[module] + 2, nums[module] + 3] else: return None
python
def Channels(module): ''' Returns the channels contained in the given K2 module. ''' nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} if module in nums: return [nums[module], nums[module] + 1, nums[module] + 2, nums[module] + 3] else: return None
[ "def", "Channels", "(", "module", ")", ":", "nums", "=", "{", "2", ":", "1", ",", "3", ":", "5", ",", "4", ":", "9", ",", "6", ":", "13", ",", "7", ":", "17", ",", "8", ":", "21", ",", "9", ":", "25", ",", "10", ":", "29", ",", "11", ":", "33", ",", "12", ":", "37", ",", "13", ":", "41", ",", "14", ":", "45", ",", "15", ":", "49", ",", "16", ":", "53", ",", "17", ":", "57", ",", "18", ":", "61", ",", "19", ":", "65", ",", "20", ":", "69", ",", "22", ":", "73", ",", "23", ":", "77", ",", "24", ":", "81", "}", "if", "module", "in", "nums", ":", "return", "[", "nums", "[", "module", "]", ",", "nums", "[", "module", "]", "+", "1", ",", "nums", "[", "module", "]", "+", "2", ",", "nums", "[", "module", "]", "+", "3", "]", "else", ":", "return", "None" ]
Returns the channels contained in the given K2 module.
[ "Returns", "the", "channels", "contained", "in", "the", "given", "K2", "module", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L334-L349
rodluger/everest
everest/missions/k2/utils.py
KepMag
def KepMag(EPIC, campaign=None): ''' Returns the *Kepler* magnitude for a given EPIC target. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) stars = GetK2Stars()[campaign] i = np.argmax([s[0] == EPIC for s in stars]) return stars[i][1]
python
def KepMag(EPIC, campaign=None): ''' Returns the *Kepler* magnitude for a given EPIC target. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) stars = GetK2Stars()[campaign] i = np.argmax([s[0] == EPIC for s in stars]) return stars[i][1]
[ "def", "KepMag", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "if", "campaign", "is", "None", ":", "campaign", "=", "Campaign", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "stars", "=", "GetK2Stars", "(", ")", "[", "campaign", "]", "i", "=", "np", ".", "argmax", "(", "[", "s", "[", "0", "]", "==", "EPIC", "for", "s", "in", "stars", "]", ")", "return", "stars", "[", "i", "]", "[", "1", "]" ]
Returns the *Kepler* magnitude for a given EPIC target.
[ "Returns", "the", "*", "Kepler", "*", "magnitude", "for", "a", "given", "EPIC", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L352-L365
rodluger/everest
everest/missions/k2/utils.py
RemoveBackground
def RemoveBackground(EPIC, campaign=None): ''' Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not to remove the background flux for the target. If ``campaign < 3``, returns :py:obj:`True`, otherwise returns :py:obj:`False`. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) if campaign < 3: return True else: return False
python
def RemoveBackground(EPIC, campaign=None): ''' Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not to remove the background flux for the target. If ``campaign < 3``, returns :py:obj:`True`, otherwise returns :py:obj:`False`. ''' if campaign is None: campaign = Campaign(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) if campaign < 3: return True else: return False
[ "def", "RemoveBackground", "(", "EPIC", ",", "campaign", "=", "None", ")", ":", "if", "campaign", "is", "None", ":", "campaign", "=", "Campaign", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "if", "campaign", "<", "3", ":", "return", "True", "else", ":", "return", "False" ]
Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not to remove the background flux for the target. If ``campaign < 3``, returns :py:obj:`True`, otherwise returns :py:obj:`False`.
[ "Returns", ":", "py", ":", "obj", ":", "True", "or", ":", "py", ":", "obj", ":", "False", "indicating", "whether", "or", "not", "to", "remove", "the", "background", "flux", "for", "the", "target", ".", "If", "campaign", "<", "3", "returns", ":", "py", ":", "obj", ":", "True", "otherwise", "returns", ":", "py", ":", "obj", ":", "False", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L368-L384
rodluger/everest
everest/missions/k2/utils.py
GetNeighboringChannels
def GetNeighboringChannels(channel): ''' Returns all channels on the same module as :py:obj:`channel`. ''' x = divmod(channel - 1, 4)[1] return channel + np.array(range(-x, -x + 4), dtype=int)
python
def GetNeighboringChannels(channel): ''' Returns all channels on the same module as :py:obj:`channel`. ''' x = divmod(channel - 1, 4)[1] return channel + np.array(range(-x, -x + 4), dtype=int)
[ "def", "GetNeighboringChannels", "(", "channel", ")", ":", "x", "=", "divmod", "(", "channel", "-", "1", ",", "4", ")", "[", "1", "]", "return", "channel", "+", "np", ".", "array", "(", "range", "(", "-", "x", ",", "-", "x", "+", "4", ")", ",", "dtype", "=", "int", ")" ]
Returns all channels on the same module as :py:obj:`channel`.
[ "Returns", "all", "channels", "on", "the", "same", "module", "as", ":", "py", ":", "obj", ":", "channel", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L387-L394
rodluger/everest
everest/missions/k2/utils.py
MASTRADec
def MASTRADec(ra, dec, darcsec, stars_only=False): ''' Detector location retrieval based upon RA and Dec. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_. ''' # coordinate limits darcsec /= 3600.0 ra1 = ra - darcsec / np.cos(dec * np.pi / 180) ra2 = ra + darcsec / np.cos(dec * np.pi / 180) dec1 = dec - darcsec dec2 = dec + darcsec # build mast query url = 'http://archive.stsci.edu/k2/epic/search.php?' url += 'action=Search' url += '&k2_ra=' + str(ra1) + '..' + str(ra2) url += '&k2_dec=' + str(dec1) + '..' + str(dec2) url += '&max_records=10000' url += '&selectedColumnsCsv=id,k2_ra,k2_dec,kp' url += '&outputformat=CSV' if stars_only: url += '&ktc_target_type=LC' url += '&objtype=star' # retrieve results from MAST try: lines = urllib.request.urlopen(url) except: log.warn('Unable to retrieve source data from MAST.') lines = '' # collate nearby sources epicid = [] kepmag = [] ra = [] dec = [] for line in lines: line = line.strip().decode('ascii') if (len(line) > 0 and 'EPIC' not in line and 'integer' not in line and 'no rows found' not in line): out = line.split(',') r, d = sex2dec(out[1], out[2]) epicid.append(int(out[0])) kepmag.append(float(out[3])) ra.append(r) dec.append(d) epicid = np.array(epicid) kepmag = np.array(kepmag) ra = np.array(ra) dec = np.array(dec) return epicid, ra, dec, kepmag
python
def MASTRADec(ra, dec, darcsec, stars_only=False): ''' Detector location retrieval based upon RA and Dec. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_. ''' # coordinate limits darcsec /= 3600.0 ra1 = ra - darcsec / np.cos(dec * np.pi / 180) ra2 = ra + darcsec / np.cos(dec * np.pi / 180) dec1 = dec - darcsec dec2 = dec + darcsec # build mast query url = 'http://archive.stsci.edu/k2/epic/search.php?' url += 'action=Search' url += '&k2_ra=' + str(ra1) + '..' + str(ra2) url += '&k2_dec=' + str(dec1) + '..' + str(dec2) url += '&max_records=10000' url += '&selectedColumnsCsv=id,k2_ra,k2_dec,kp' url += '&outputformat=CSV' if stars_only: url += '&ktc_target_type=LC' url += '&objtype=star' # retrieve results from MAST try: lines = urllib.request.urlopen(url) except: log.warn('Unable to retrieve source data from MAST.') lines = '' # collate nearby sources epicid = [] kepmag = [] ra = [] dec = [] for line in lines: line = line.strip().decode('ascii') if (len(line) > 0 and 'EPIC' not in line and 'integer' not in line and 'no rows found' not in line): out = line.split(',') r, d = sex2dec(out[1], out[2]) epicid.append(int(out[0])) kepmag.append(float(out[3])) ra.append(r) dec.append(d) epicid = np.array(epicid) kepmag = np.array(kepmag) ra = np.array(ra) dec = np.array(dec) return epicid, ra, dec, kepmag
[ "def", "MASTRADec", "(", "ra", ",", "dec", ",", "darcsec", ",", "stars_only", "=", "False", ")", ":", "# coordinate limits", "darcsec", "/=", "3600.0", "ra1", "=", "ra", "-", "darcsec", "/", "np", ".", "cos", "(", "dec", "*", "np", ".", "pi", "/", "180", ")", "ra2", "=", "ra", "+", "darcsec", "/", "np", ".", "cos", "(", "dec", "*", "np", ".", "pi", "/", "180", ")", "dec1", "=", "dec", "-", "darcsec", "dec2", "=", "dec", "+", "darcsec", "# build mast query", "url", "=", "'http://archive.stsci.edu/k2/epic/search.php?'", "url", "+=", "'action=Search'", "url", "+=", "'&k2_ra='", "+", "str", "(", "ra1", ")", "+", "'..'", "+", "str", "(", "ra2", ")", "url", "+=", "'&k2_dec='", "+", "str", "(", "dec1", ")", "+", "'..'", "+", "str", "(", "dec2", ")", "url", "+=", "'&max_records=10000'", "url", "+=", "'&selectedColumnsCsv=id,k2_ra,k2_dec,kp'", "url", "+=", "'&outputformat=CSV'", "if", "stars_only", ":", "url", "+=", "'&ktc_target_type=LC'", "url", "+=", "'&objtype=star'", "# retrieve results from MAST", "try", ":", "lines", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "except", ":", "log", ".", "warn", "(", "'Unable to retrieve source data from MAST.'", ")", "lines", "=", "''", "# collate nearby sources", "epicid", "=", "[", "]", "kepmag", "=", "[", "]", "ra", "=", "[", "]", "dec", "=", "[", "]", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "decode", "(", "'ascii'", ")", "if", "(", "len", "(", "line", ")", ">", "0", "and", "'EPIC'", "not", "in", "line", "and", "'integer'", "not", "in", "line", "and", "'no rows found'", "not", "in", "line", ")", ":", "out", "=", "line", ".", "split", "(", "','", ")", "r", ",", "d", "=", "sex2dec", "(", "out", "[", "1", "]", ",", "out", "[", "2", "]", ")", "epicid", ".", "append", "(", "int", "(", "out", "[", "0", "]", ")", ")", "kepmag", ".", "append", "(", "float", "(", "out", "[", "3", "]", ")", ")", "ra", ".", "append", "(", "r", ")", "dec", ".", "append", "(", "d", ")", "epicid", "=", "np", ".", "array", "(", "epicid", ")", "kepmag", "=", "np", ".", "array", "(", "kepmag", ")", "ra", "=", "np", ".", "array", "(", "ra", ")", "dec", "=", "np", ".", "array", "(", "dec", ")", "return", "epicid", ",", "ra", ",", "dec", ",", "kepmag" ]
Detector location retrieval based upon RA and Dec. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_.
[ "Detector", "location", "retrieval", "based", "upon", "RA", "and", "Dec", ".", "Adapted", "from", "PyKE", "<http", ":", "//", "keplergo", ".", "arc", ".", "nasa", ".", "gov", "/", "PyKE", ".", "shtml", ">", "_", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L397-L454
rodluger/everest
everest/missions/k2/utils.py
sex2dec
def sex2dec(ra, dec): ''' Convert sexadecimal hours to decimal degrees. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_. :param float ra: The right ascension :param float dec: The declination :returns: The same values, but in decimal degrees ''' ra = re.sub('\s+', '|', ra.strip()) ra = re.sub(':', '|', ra.strip()) ra = re.sub(';', '|', ra.strip()) ra = re.sub(',', '|', ra.strip()) ra = re.sub('-', '|', ra.strip()) ra = ra.split('|') outra = (float(ra[0]) + float(ra[1]) / 60. + float(ra[2]) / 3600.) * 15.0 dec = re.sub('\s+', '|', dec.strip()) dec = re.sub(':', '|', dec.strip()) dec = re.sub(';', '|', dec.strip()) dec = re.sub(',', '|', dec.strip()) dec = dec.split('|') if float(dec[0]) > 0.0: outdec = float(dec[0]) + float(dec[1]) / 60. + float(dec[2]) / 3600. else: outdec = float(dec[0]) - float(dec[1]) / 60. - float(dec[2]) / 3600. return outra, outdec
python
def sex2dec(ra, dec): ''' Convert sexadecimal hours to decimal degrees. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_. :param float ra: The right ascension :param float dec: The declination :returns: The same values, but in decimal degrees ''' ra = re.sub('\s+', '|', ra.strip()) ra = re.sub(':', '|', ra.strip()) ra = re.sub(';', '|', ra.strip()) ra = re.sub(',', '|', ra.strip()) ra = re.sub('-', '|', ra.strip()) ra = ra.split('|') outra = (float(ra[0]) + float(ra[1]) / 60. + float(ra[2]) / 3600.) * 15.0 dec = re.sub('\s+', '|', dec.strip()) dec = re.sub(':', '|', dec.strip()) dec = re.sub(';', '|', dec.strip()) dec = re.sub(',', '|', dec.strip()) dec = dec.split('|') if float(dec[0]) > 0.0: outdec = float(dec[0]) + float(dec[1]) / 60. + float(dec[2]) / 3600. else: outdec = float(dec[0]) - float(dec[1]) / 60. - float(dec[2]) / 3600. return outra, outdec
[ "def", "sex2dec", "(", "ra", ",", "dec", ")", ":", "ra", "=", "re", ".", "sub", "(", "'\\s+'", ",", "'|'", ",", "ra", ".", "strip", "(", ")", ")", "ra", "=", "re", ".", "sub", "(", "':'", ",", "'|'", ",", "ra", ".", "strip", "(", ")", ")", "ra", "=", "re", ".", "sub", "(", "';'", ",", "'|'", ",", "ra", ".", "strip", "(", ")", ")", "ra", "=", "re", ".", "sub", "(", "','", ",", "'|'", ",", "ra", ".", "strip", "(", ")", ")", "ra", "=", "re", ".", "sub", "(", "'-'", ",", "'|'", ",", "ra", ".", "strip", "(", ")", ")", "ra", "=", "ra", ".", "split", "(", "'|'", ")", "outra", "=", "(", "float", "(", "ra", "[", "0", "]", ")", "+", "float", "(", "ra", "[", "1", "]", ")", "/", "60.", "+", "float", "(", "ra", "[", "2", "]", ")", "/", "3600.", ")", "*", "15.0", "dec", "=", "re", ".", "sub", "(", "'\\s+'", ",", "'|'", ",", "dec", ".", "strip", "(", ")", ")", "dec", "=", "re", ".", "sub", "(", "':'", ",", "'|'", ",", "dec", ".", "strip", "(", ")", ")", "dec", "=", "re", ".", "sub", "(", "';'", ",", "'|'", ",", "dec", ".", "strip", "(", ")", ")", "dec", "=", "re", ".", "sub", "(", "','", ",", "'|'", ",", "dec", ".", "strip", "(", ")", ")", "dec", "=", "dec", ".", "split", "(", "'|'", ")", "if", "float", "(", "dec", "[", "0", "]", ")", ">", "0.0", ":", "outdec", "=", "float", "(", "dec", "[", "0", "]", ")", "+", "float", "(", "dec", "[", "1", "]", ")", "/", "60.", "+", "float", "(", "dec", "[", "2", "]", ")", "/", "3600.", "else", ":", "outdec", "=", "float", "(", "dec", "[", "0", "]", ")", "-", "float", "(", "dec", "[", "1", "]", ")", "/", "60.", "-", "float", "(", "dec", "[", "2", "]", ")", "/", "3600.", "return", "outra", ",", "outdec" ]
Convert sexadecimal hours to decimal degrees. Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_. :param float ra: The right ascension :param float dec: The declination :returns: The same values, but in decimal degrees
[ "Convert", "sexadecimal", "hours", "to", "decimal", "degrees", ".", "Adapted", "from", "PyKE", "<http", ":", "//", "keplergo", ".", "arc", ".", "nasa", ".", "gov", "/", "PyKE", ".", "shtml", ">", "_", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L457-L488
rodluger/everest
everest/missions/k2/utils.py
GetSources
def GetSources(ID, darcsec=None, stars_only=False): ''' Grabs the EPIC coordinates from the TPF and searches MAST for other EPIC targets within the same aperture. :param int ID: The 9-digit :py:obj:`EPIC` number of the target :param float darcsec: The search radius in arcseconds. \ Default is four times the largest dimension of the aperture. :param bool stars_only: If :py:obj:`True`, only returns objects \ explicitly designated as `"stars"` in MAST. Default :py:obj:`False` :returns: A list of :py:class:`Source` instances containing \ other :py:obj:`EPIC` targets within or close to this \ target's aperture ''' client = kplr.API() star = client.k2_star(ID) tpf = star.get_target_pixel_files()[0] with tpf.open() as f: crpix1 = f[2].header['CRPIX1'] crpix2 = f[2].header['CRPIX2'] crval1 = f[2].header['CRVAL1'] crval2 = f[2].header['CRVAL2'] cdelt1 = f[2].header['CDELT1'] cdelt2 = f[2].header['CDELT2'] pc1_1 = f[2].header['PC1_1'] pc1_2 = f[2].header['PC1_2'] pc2_1 = f[2].header['PC2_1'] pc2_2 = f[2].header['PC2_2'] pc = np.array([[pc1_1, pc1_2], [pc2_1, pc2_2]]) pc = np.linalg.inv(pc) crpix1p = f[2].header['CRPIX1P'] crpix2p = f[2].header['CRPIX2P'] crval1p = f[2].header['CRVAL1P'] crval2p = f[2].header['CRVAL2P'] cdelt1p = f[2].header['CDELT1P'] cdelt2p = f[2].header['CDELT2P'] if darcsec is None: darcsec = 4 * max(f[2].data.shape) epicid, ra, dec, kepmag = MASTRADec( star.k2_ra, star.k2_dec, darcsec, stars_only) sources = [] for i, epic in enumerate(epicid): dra = (ra[i] - crval1) * np.cos(np.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 sx = pc[0, 0] * dra + pc[0, 1] * ddec + crpix1 + crval1p - 1.0 sy = pc[1, 0] * dra + pc[1, 1] * ddec + crpix2 + crval2p - 1.0 sources.append(dict(ID=epic, x=sx, y=sy, mag=kepmag[i], x0=crval1p, y0=crval2p)) return sources
python
def GetSources(ID, darcsec=None, stars_only=False): ''' Grabs the EPIC coordinates from the TPF and searches MAST for other EPIC targets within the same aperture. :param int ID: The 9-digit :py:obj:`EPIC` number of the target :param float darcsec: The search radius in arcseconds. \ Default is four times the largest dimension of the aperture. :param bool stars_only: If :py:obj:`True`, only returns objects \ explicitly designated as `"stars"` in MAST. Default :py:obj:`False` :returns: A list of :py:class:`Source` instances containing \ other :py:obj:`EPIC` targets within or close to this \ target's aperture ''' client = kplr.API() star = client.k2_star(ID) tpf = star.get_target_pixel_files()[0] with tpf.open() as f: crpix1 = f[2].header['CRPIX1'] crpix2 = f[2].header['CRPIX2'] crval1 = f[2].header['CRVAL1'] crval2 = f[2].header['CRVAL2'] cdelt1 = f[2].header['CDELT1'] cdelt2 = f[2].header['CDELT2'] pc1_1 = f[2].header['PC1_1'] pc1_2 = f[2].header['PC1_2'] pc2_1 = f[2].header['PC2_1'] pc2_2 = f[2].header['PC2_2'] pc = np.array([[pc1_1, pc1_2], [pc2_1, pc2_2]]) pc = np.linalg.inv(pc) crpix1p = f[2].header['CRPIX1P'] crpix2p = f[2].header['CRPIX2P'] crval1p = f[2].header['CRVAL1P'] crval2p = f[2].header['CRVAL2P'] cdelt1p = f[2].header['CDELT1P'] cdelt2p = f[2].header['CDELT2P'] if darcsec is None: darcsec = 4 * max(f[2].data.shape) epicid, ra, dec, kepmag = MASTRADec( star.k2_ra, star.k2_dec, darcsec, stars_only) sources = [] for i, epic in enumerate(epicid): dra = (ra[i] - crval1) * np.cos(np.radians(dec[i])) / cdelt1 ddec = (dec[i] - crval2) / cdelt2 sx = pc[0, 0] * dra + pc[0, 1] * ddec + crpix1 + crval1p - 1.0 sy = pc[1, 0] * dra + pc[1, 1] * ddec + crpix2 + crval2p - 1.0 sources.append(dict(ID=epic, x=sx, y=sy, mag=kepmag[i], x0=crval1p, y0=crval2p)) return sources
[ "def", "GetSources", "(", "ID", ",", "darcsec", "=", "None", ",", "stars_only", "=", "False", ")", ":", "client", "=", "kplr", ".", "API", "(", ")", "star", "=", "client", ".", "k2_star", "(", "ID", ")", "tpf", "=", "star", ".", "get_target_pixel_files", "(", ")", "[", "0", "]", "with", "tpf", ".", "open", "(", ")", "as", "f", ":", "crpix1", "=", "f", "[", "2", "]", ".", "header", "[", "'CRPIX1'", "]", "crpix2", "=", "f", "[", "2", "]", ".", "header", "[", "'CRPIX2'", "]", "crval1", "=", "f", "[", "2", "]", ".", "header", "[", "'CRVAL1'", "]", "crval2", "=", "f", "[", "2", "]", ".", "header", "[", "'CRVAL2'", "]", "cdelt1", "=", "f", "[", "2", "]", ".", "header", "[", "'CDELT1'", "]", "cdelt2", "=", "f", "[", "2", "]", ".", "header", "[", "'CDELT2'", "]", "pc1_1", "=", "f", "[", "2", "]", ".", "header", "[", "'PC1_1'", "]", "pc1_2", "=", "f", "[", "2", "]", ".", "header", "[", "'PC1_2'", "]", "pc2_1", "=", "f", "[", "2", "]", ".", "header", "[", "'PC2_1'", "]", "pc2_2", "=", "f", "[", "2", "]", ".", "header", "[", "'PC2_2'", "]", "pc", "=", "np", ".", "array", "(", "[", "[", "pc1_1", ",", "pc1_2", "]", ",", "[", "pc2_1", ",", "pc2_2", "]", "]", ")", "pc", "=", "np", ".", "linalg", ".", "inv", "(", "pc", ")", "crpix1p", "=", "f", "[", "2", "]", ".", "header", "[", "'CRPIX1P'", "]", "crpix2p", "=", "f", "[", "2", "]", ".", "header", "[", "'CRPIX2P'", "]", "crval1p", "=", "f", "[", "2", "]", ".", "header", "[", "'CRVAL1P'", "]", "crval2p", "=", "f", "[", "2", "]", ".", "header", "[", "'CRVAL2P'", "]", "cdelt1p", "=", "f", "[", "2", "]", ".", "header", "[", "'CDELT1P'", "]", "cdelt2p", "=", "f", "[", "2", "]", ".", "header", "[", "'CDELT2P'", "]", "if", "darcsec", "is", "None", ":", "darcsec", "=", "4", "*", "max", "(", "f", "[", "2", "]", ".", "data", ".", "shape", ")", "epicid", ",", "ra", ",", "dec", ",", "kepmag", "=", "MASTRADec", "(", "star", ".", "k2_ra", ",", "star", ".", "k2_dec", ",", "darcsec", ",", "stars_only", ")", "sources", "=", "[", "]", "for", "i", ",", "epic", "in", "enumerate", "(", "epicid", ")", ":", "dra", "=", "(", "ra", "[", "i", "]", "-", "crval1", ")", "*", "np", ".", "cos", "(", "np", ".", "radians", "(", "dec", "[", "i", "]", ")", ")", "/", "cdelt1", "ddec", "=", "(", "dec", "[", "i", "]", "-", "crval2", ")", "/", "cdelt2", "sx", "=", "pc", "[", "0", ",", "0", "]", "*", "dra", "+", "pc", "[", "0", ",", "1", "]", "*", "ddec", "+", "crpix1", "+", "crval1p", "-", "1.0", "sy", "=", "pc", "[", "1", ",", "0", "]", "*", "dra", "+", "pc", "[", "1", ",", "1", "]", "*", "ddec", "+", "crpix2", "+", "crval2p", "-", "1.0", "sources", ".", "append", "(", "dict", "(", "ID", "=", "epic", ",", "x", "=", "sx", ",", "y", "=", "sy", ",", "mag", "=", "kepmag", "[", "i", "]", ",", "x0", "=", "crval1p", ",", "y0", "=", "crval2p", ")", ")", "return", "sources" ]
Grabs the EPIC coordinates from the TPF and searches MAST for other EPIC targets within the same aperture. :param int ID: The 9-digit :py:obj:`EPIC` number of the target :param float darcsec: The search radius in arcseconds. \ Default is four times the largest dimension of the aperture. :param bool stars_only: If :py:obj:`True`, only returns objects \ explicitly designated as `"stars"` in MAST. Default :py:obj:`False` :returns: A list of :py:class:`Source` instances containing \ other :py:obj:`EPIC` targets within or close to this \ target's aperture
[ "Grabs", "the", "EPIC", "coordinates", "from", "the", "TPF", "and", "searches", "MAST", "for", "other", "EPIC", "targets", "within", "the", "same", "aperture", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L491-L542
rodluger/everest
everest/missions/k2/utils.py
GetHiResImage
def GetHiResImage(ID): ''' Queries the Palomar Observatory Sky Survey II catalog to obtain a higher resolution optical image of the star with EPIC number :py:obj:`ID`. ''' # Get the TPF info client = kplr.API() star = client.k2_star(ID) k2ra = star.k2_ra k2dec = star.k2_dec tpf = star.get_target_pixel_files()[0] with tpf.open() as f: k2wcs = WCS(f[2].header) shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape # Get the POSS URL hou = int(k2ra * 24 / 360.) min = int(60 * (k2ra * 24 / 360. - hou)) sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min) ra = '%02d+%02d+%.2f' % (hou, min, sec) sgn = '' if np.sign(k2dec) >= 0 else '-' deg = int(np.abs(k2dec)) min = int(60 * (np.abs(k2dec) - deg)) sec = 3600 * (np.abs(k2dec) - deg - min / 60) dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec) url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \ 'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec) # Query the server r = urllib.request.Request(url) handler = urllib.request.urlopen(r) code = handler.getcode() if int(code) != 200: # Unavailable return None data = handler.read() # Atomically write to a temp file f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() # Now open the POSS fits file with pyfits.open(f.name) as ff: img = ff[0].data # Map POSS pixels onto K2 pixels xy = np.empty((img.shape[0] * img.shape[1], 2)) z = np.empty(img.shape[0] * img.shape[1]) pwcs = WCS(f.name) k = 0 for i in range(img.shape[0]): for j in range(img.shape[1]): ra, dec = pwcs.all_pix2world(float(j), float(i), 0) xy[k] = k2wcs.all_world2pix(ra, dec, 0) z[k] = img[i, j] k += 1 # Resample grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1] resampled = griddata(xy, z, (grid_x, grid_y), method='cubic') # Rotate to align with K2 image. Not sure why, but it is necessary resampled = np.rot90(resampled) return resampled
python
def GetHiResImage(ID): ''' Queries the Palomar Observatory Sky Survey II catalog to obtain a higher resolution optical image of the star with EPIC number :py:obj:`ID`. ''' # Get the TPF info client = kplr.API() star = client.k2_star(ID) k2ra = star.k2_ra k2dec = star.k2_dec tpf = star.get_target_pixel_files()[0] with tpf.open() as f: k2wcs = WCS(f[2].header) shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape # Get the POSS URL hou = int(k2ra * 24 / 360.) min = int(60 * (k2ra * 24 / 360. - hou)) sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min) ra = '%02d+%02d+%.2f' % (hou, min, sec) sgn = '' if np.sign(k2dec) >= 0 else '-' deg = int(np.abs(k2dec)) min = int(60 * (np.abs(k2dec) - deg)) sec = 3600 * (np.abs(k2dec) - deg - min / 60) dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec) url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \ 'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec) # Query the server r = urllib.request.Request(url) handler = urllib.request.urlopen(r) code = handler.getcode() if int(code) != 200: # Unavailable return None data = handler.read() # Atomically write to a temp file f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() # Now open the POSS fits file with pyfits.open(f.name) as ff: img = ff[0].data # Map POSS pixels onto K2 pixels xy = np.empty((img.shape[0] * img.shape[1], 2)) z = np.empty(img.shape[0] * img.shape[1]) pwcs = WCS(f.name) k = 0 for i in range(img.shape[0]): for j in range(img.shape[1]): ra, dec = pwcs.all_pix2world(float(j), float(i), 0) xy[k] = k2wcs.all_world2pix(ra, dec, 0) z[k] = img[i, j] k += 1 # Resample grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1] resampled = griddata(xy, z, (grid_x, grid_y), method='cubic') # Rotate to align with K2 image. Not sure why, but it is necessary resampled = np.rot90(resampled) return resampled
[ "def", "GetHiResImage", "(", "ID", ")", ":", "# Get the TPF info", "client", "=", "kplr", ".", "API", "(", ")", "star", "=", "client", ".", "k2_star", "(", "ID", ")", "k2ra", "=", "star", ".", "k2_ra", "k2dec", "=", "star", ".", "k2_dec", "tpf", "=", "star", ".", "get_target_pixel_files", "(", ")", "[", "0", "]", "with", "tpf", ".", "open", "(", ")", "as", "f", ":", "k2wcs", "=", "WCS", "(", "f", "[", "2", "]", ".", "header", ")", "shape", "=", "np", ".", "array", "(", "f", "[", "1", "]", ".", "data", ".", "field", "(", "'FLUX'", ")", ",", "dtype", "=", "'float64'", ")", "[", "0", "]", ".", "shape", "# Get the POSS URL", "hou", "=", "int", "(", "k2ra", "*", "24", "/", "360.", ")", "min", "=", "int", "(", "60", "*", "(", "k2ra", "*", "24", "/", "360.", "-", "hou", ")", ")", "sec", "=", "60", "*", "(", "60", "*", "(", "k2ra", "*", "24", "/", "360.", "-", "hou", ")", "-", "min", ")", "ra", "=", "'%02d+%02d+%.2f'", "%", "(", "hou", ",", "min", ",", "sec", ")", "sgn", "=", "''", "if", "np", ".", "sign", "(", "k2dec", ")", ">=", "0", "else", "'-'", "deg", "=", "int", "(", "np", ".", "abs", "(", "k2dec", ")", ")", "min", "=", "int", "(", "60", "*", "(", "np", ".", "abs", "(", "k2dec", ")", "-", "deg", ")", ")", "sec", "=", "3600", "*", "(", "np", ".", "abs", "(", "k2dec", ")", "-", "deg", "-", "min", "/", "60", ")", "dec", "=", "'%s%02d+%02d+%.1f'", "%", "(", "sgn", ",", "deg", ",", "min", ",", "sec", ")", "url", "=", "'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&'", "+", "'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3='", "%", "(", "ra", ",", "dec", ")", "# Query the server", "r", "=", "urllib", ".", "request", ".", "Request", "(", "url", ")", "handler", "=", "urllib", ".", "request", ".", "urlopen", "(", "r", ")", "code", "=", "handler", ".", "getcode", "(", ")", "if", "int", "(", "code", ")", "!=", "200", ":", "# Unavailable", "return", "None", "data", "=", "handler", ".", "read", "(", ")", "# Atomically write to a temp file", "f", "=", "NamedTemporaryFile", "(", "\"wb\"", ",", "delete", "=", "False", ")", "f", ".", "write", "(", "data", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "f", ".", "close", "(", ")", "# Now open the POSS fits file", "with", "pyfits", ".", "open", "(", "f", ".", "name", ")", "as", "ff", ":", "img", "=", "ff", "[", "0", "]", ".", "data", "# Map POSS pixels onto K2 pixels", "xy", "=", "np", ".", "empty", "(", "(", "img", ".", "shape", "[", "0", "]", "*", "img", ".", "shape", "[", "1", "]", ",", "2", ")", ")", "z", "=", "np", ".", "empty", "(", "img", ".", "shape", "[", "0", "]", "*", "img", ".", "shape", "[", "1", "]", ")", "pwcs", "=", "WCS", "(", "f", ".", "name", ")", "k", "=", "0", "for", "i", "in", "range", "(", "img", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "img", ".", "shape", "[", "1", "]", ")", ":", "ra", ",", "dec", "=", "pwcs", ".", "all_pix2world", "(", "float", "(", "j", ")", ",", "float", "(", "i", ")", ",", "0", ")", "xy", "[", "k", "]", "=", "k2wcs", ".", "all_world2pix", "(", "ra", ",", "dec", ",", "0", ")", "z", "[", "k", "]", "=", "img", "[", "i", ",", "j", "]", "k", "+=", "1", "# Resample", "grid_x", ",", "grid_y", "=", "np", ".", "mgrid", "[", "-", "0.5", ":", "shape", "[", "1", "]", "-", "0.5", ":", "0.1", ",", "-", "0.5", ":", "shape", "[", "0", "]", "-", "0.5", ":", "0.1", "]", "resampled", "=", "griddata", "(", "xy", ",", "z", ",", "(", "grid_x", ",", "grid_y", ")", ",", "method", "=", "'cubic'", ")", "# Rotate to align with K2 image. Not sure why, but it is necessary", "resampled", "=", "np", ".", "rot90", "(", "resampled", ")", "return", "resampled" ]
Queries the Palomar Observatory Sky Survey II catalog to obtain a higher resolution optical image of the star with EPIC number :py:obj:`ID`.
[ "Queries", "the", "Palomar", "Observatory", "Sky", "Survey", "II", "catalog", "to", "obtain", "a", "higher", "resolution", "optical", "image", "of", "the", "star", "with", "EPIC", "number", ":", "py", ":", "obj", ":", "ID", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L545-L615
rodluger/everest
everest/missions/k2/utils.py
SaturationFlux
def SaturationFlux(EPIC, campaign=None, **kwargs): ''' Returns the well depth for the target. If any of the target's pixels have flux larger than this value, they are likely to be saturated and cause charge bleeding. The well depths were obtained from Table 13 of the Kepler instrument handbook. We assume an exposure time of 6.02s. ''' channel, well_depth = np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'well_depth.tsv'), unpack=True) satflx = well_depth[channel == Channel(EPIC, campaign=campaign)][0] / 6.02 return satflx
python
def SaturationFlux(EPIC, campaign=None, **kwargs): ''' Returns the well depth for the target. If any of the target's pixels have flux larger than this value, they are likely to be saturated and cause charge bleeding. The well depths were obtained from Table 13 of the Kepler instrument handbook. We assume an exposure time of 6.02s. ''' channel, well_depth = np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'well_depth.tsv'), unpack=True) satflx = well_depth[channel == Channel(EPIC, campaign=campaign)][0] / 6.02 return satflx
[ "def", "SaturationFlux", "(", "EPIC", ",", "campaign", "=", "None", ",", "*", "*", "kwargs", ")", ":", "channel", ",", "well_depth", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'well_depth.tsv'", ")", ",", "unpack", "=", "True", ")", "satflx", "=", "well_depth", "[", "channel", "==", "Channel", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "]", "[", "0", "]", "/", "6.02", "return", "satflx" ]
Returns the well depth for the target. If any of the target's pixels have flux larger than this value, they are likely to be saturated and cause charge bleeding. The well depths were obtained from Table 13 of the Kepler instrument handbook. We assume an exposure time of 6.02s.
[ "Returns", "the", "well", "depth", "for", "the", "target", ".", "If", "any", "of", "the", "target", "s", "pixels", "have", "flux", "larger", "than", "this", "value", "they", "are", "likely", "to", "be", "saturated", "and", "cause", "charge", "bleeding", ".", "The", "well", "depths", "were", "obtained", "from", "Table", "13", "of", "the", "Kepler", "instrument", "handbook", ".", "We", "assume", "an", "exposure", "time", "of", "6", ".", "02s", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L627-L641
rodluger/everest
everest/missions/k2/sysrem.py
GetChunk
def GetChunk(time, breakpoints, b, mask=[]): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return ''' M = np.delete(np.arange(len(time)), mask, axis=0) if b > 0: res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: res = M[M <= breakpoints[b]] return res
python
def GetChunk(time, breakpoints, b, mask=[]): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return ''' M = np.delete(np.arange(len(time)), mask, axis=0) if b > 0: res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: res = M[M <= breakpoints[b]] return res
[ "def", "GetChunk", "(", "time", ",", "breakpoints", ",", "b", ",", "mask", "=", "[", "]", ")", ":", "M", "=", "np", ".", "delete", "(", "np", ".", "arange", "(", "len", "(", "time", ")", ")", ",", "mask", ",", "axis", "=", "0", ")", "if", "b", ">", "0", ":", "res", "=", "M", "[", "(", "M", ">", "breakpoints", "[", "b", "-", "1", "]", ")", "&", "(", "M", "<=", "breakpoints", "[", "b", "]", ")", "]", "else", ":", "res", "=", "M", "[", "M", "<=", "breakpoints", "[", "b", "]", "]", "return", "res" ]
Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return
[ "Returns", "the", "indices", "corresponding", "to", "a", "given", "light", "curve", "chunk", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/sysrem.py#L25-L38
rodluger/everest
everest/missions/k2/sysrem.py
GetStars
def GetStars(campaign, module, model='nPLD', **kwargs): ''' Returns de-trended light curves for all stars on a given module in a given campaign. ''' # Get the channel numbers channels = Channels(module) assert channels is not None, "No channels available on this module." # Get the EPIC numbers all = GetK2Campaign(campaign) stars = np.array([s[0] for s in all if s[2] in channels and os.path.exists( os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % s[0])[:4] + '00000', ('%09d' % s[0])[4:], model + '.npz'))], dtype=int) N = len(stars) assert N > 0, "No light curves found for campaign %d, module %d." % ( campaign, module) # Loop over all stars and store the fluxes in a list fluxes = [] errors = [] kpars = [] for n in range(N): # De-trended light curve file name nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % stars[n])[:4] + '00000', ('%09d' % stars[n])[4:], model + '.npz') # Get the data data = np.load(nf) t = data['time'] if n == 0: time = t breakpoints = data['breakpoints'] # Get de-trended light curve y = data['fraw'] - data['model'] err = data['fraw_err'] # De-weight outliers and bad timestamps m = np.array(list(set(np.concatenate([data['outmask'], data['badmask'], data['nanmask'], data['transitmask']]))), dtype=int) # Interpolate over the outliers y = np.interp(t, np.delete(t, m), np.delete(y, m)) err = np.interp(t, np.delete(t, m), np.delete(err, m)) # Append to our running lists fluxes.append(y) errors.append(err) kpars.append(data['kernel_params']) return time, breakpoints, np.array(fluxes), \ np.array(errors), np.array(kpars)
python
def GetStars(campaign, module, model='nPLD', **kwargs): ''' Returns de-trended light curves for all stars on a given module in a given campaign. ''' # Get the channel numbers channels = Channels(module) assert channels is not None, "No channels available on this module." # Get the EPIC numbers all = GetK2Campaign(campaign) stars = np.array([s[0] for s in all if s[2] in channels and os.path.exists( os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % s[0])[:4] + '00000', ('%09d' % s[0])[4:], model + '.npz'))], dtype=int) N = len(stars) assert N > 0, "No light curves found for campaign %d, module %d." % ( campaign, module) # Loop over all stars and store the fluxes in a list fluxes = [] errors = [] kpars = [] for n in range(N): # De-trended light curve file name nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % stars[n])[:4] + '00000', ('%09d' % stars[n])[4:], model + '.npz') # Get the data data = np.load(nf) t = data['time'] if n == 0: time = t breakpoints = data['breakpoints'] # Get de-trended light curve y = data['fraw'] - data['model'] err = data['fraw_err'] # De-weight outliers and bad timestamps m = np.array(list(set(np.concatenate([data['outmask'], data['badmask'], data['nanmask'], data['transitmask']]))), dtype=int) # Interpolate over the outliers y = np.interp(t, np.delete(t, m), np.delete(y, m)) err = np.interp(t, np.delete(t, m), np.delete(err, m)) # Append to our running lists fluxes.append(y) errors.append(err) kpars.append(data['kernel_params']) return time, breakpoints, np.array(fluxes), \ np.array(errors), np.array(kpars)
[ "def", "GetStars", "(", "campaign", ",", "module", ",", "model", "=", "'nPLD'", ",", "*", "*", "kwargs", ")", ":", "# Get the channel numbers", "channels", "=", "Channels", "(", "module", ")", "assert", "channels", "is", "not", "None", ",", "\"No channels available on this module.\"", "# Get the EPIC numbers", "all", "=", "GetK2Campaign", "(", "campaign", ")", "stars", "=", "np", ".", "array", "(", "[", "s", "[", "0", "]", "for", "s", "in", "all", "if", "s", "[", "2", "]", "in", "channels", "and", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "int", "(", "campaign", ")", ",", "(", "'%09d'", "%", "s", "[", "0", "]", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "s", "[", "0", "]", ")", "[", "4", ":", "]", ",", "model", "+", "'.npz'", ")", ")", "]", ",", "dtype", "=", "int", ")", "N", "=", "len", "(", "stars", ")", "assert", "N", ">", "0", ",", "\"No light curves found for campaign %d, module %d.\"", "%", "(", "campaign", ",", "module", ")", "# Loop over all stars and store the fluxes in a list", "fluxes", "=", "[", "]", "errors", "=", "[", "]", "kpars", "=", "[", "]", "for", "n", "in", "range", "(", "N", ")", ":", "# De-trended light curve file name", "nf", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "int", "(", "campaign", ")", ",", "(", "'%09d'", "%", "stars", "[", "n", "]", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "stars", "[", "n", "]", ")", "[", "4", ":", "]", ",", "model", "+", "'.npz'", ")", "# Get the data", "data", "=", "np", ".", "load", "(", "nf", ")", "t", "=", "data", "[", "'time'", "]", "if", "n", "==", "0", ":", "time", "=", "t", "breakpoints", "=", "data", "[", "'breakpoints'", "]", "# Get de-trended light curve", "y", "=", "data", "[", "'fraw'", "]", "-", "data", "[", "'model'", "]", "err", "=", "data", "[", "'fraw_err'", "]", "# De-weight outliers and bad timestamps", "m", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "data", "[", "'outmask'", "]", ",", "data", "[", "'badmask'", "]", ",", "data", "[", "'nanmask'", "]", ",", "data", "[", "'transitmask'", "]", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "# Interpolate over the outliers", "y", "=", "np", ".", "interp", "(", "t", ",", "np", ".", "delete", "(", "t", ",", "m", ")", ",", "np", ".", "delete", "(", "y", ",", "m", ")", ")", "err", "=", "np", ".", "interp", "(", "t", ",", "np", ".", "delete", "(", "t", ",", "m", ")", ",", "np", ".", "delete", "(", "err", ",", "m", ")", ")", "# Append to our running lists", "fluxes", ".", "append", "(", "y", ")", "errors", ".", "append", "(", "err", ")", "kpars", ".", "append", "(", "data", "[", "'kernel_params'", "]", ")", "return", "time", ",", "breakpoints", ",", "np", ".", "array", "(", "fluxes", ")", ",", "np", ".", "array", "(", "errors", ")", ",", "np", ".", "array", "(", "kpars", ")" ]
Returns de-trended light curves for all stars on a given module in a given campaign.
[ "Returns", "de", "-", "trended", "light", "curves", "for", "all", "stars", "on", "a", "given", "module", "in", "a", "given", "campaign", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/sysrem.py#L41-L102
rodluger/everest
everest/missions/k2/sysrem.py
SysRem
def SysRem(time, flux, err, ncbv=5, niter=50, sv_win=999, sv_order=3, **kwargs): ''' Applies :py:obj:`SysRem` to a given set of light curves. :param array_like time: The time array for all of the light curves :param array_like flux: A 2D array of the fluxes for each of the light \ curves, shape `(nfluxes, ntime)` :param array_like err: A 2D array of the flux errors for each of the \ light curves, shape `(nfluxes, ntime)` :param int ncbv: The number of signals to recover. Default 5 :param int niter: The number of :py:obj:`SysRem` iterations to perform. \ Default 50 :param int sv_win: The Savitsky-Golay filter window size. Default 999 :param int sv_order: The Savitsky-Golay filter order. Default 3 ''' nflx, tlen = flux.shape # Get normalized fluxes med = np.nanmedian(flux, axis=1).reshape(-1, 1) y = flux - med # Compute the inverse of the variances invvar = 1. / err ** 2 # The CBVs for this set of fluxes cbvs = np.zeros((ncbv, tlen)) # Recover `ncbv` components for n in range(ncbv): # Initialize the weights and regressors c = np.zeros(nflx) a = np.ones(tlen) f = y * invvar # Perform `niter` iterations for i in range(niter): # Compute the `c` vector (the weights) c = np.dot(f, a) / np.dot(invvar, a ** 2) # Compute the `a` vector (the regressors) a = np.dot(c, f) / np.dot(c ** 2, invvar) # Remove this component from all light curves y -= np.outer(c, a) # Save this regressor after smoothing it a bit if sv_win >= len(a): sv_win = len(a) - 1 if sv_win % 2 == 0: sv_win -= 1 cbvs[n] = savgol_filter(a - np.nanmedian(a), sv_win, sv_order) return cbvs
python
def SysRem(time, flux, err, ncbv=5, niter=50, sv_win=999, sv_order=3, **kwargs): ''' Applies :py:obj:`SysRem` to a given set of light curves. :param array_like time: The time array for all of the light curves :param array_like flux: A 2D array of the fluxes for each of the light \ curves, shape `(nfluxes, ntime)` :param array_like err: A 2D array of the flux errors for each of the \ light curves, shape `(nfluxes, ntime)` :param int ncbv: The number of signals to recover. Default 5 :param int niter: The number of :py:obj:`SysRem` iterations to perform. \ Default 50 :param int sv_win: The Savitsky-Golay filter window size. Default 999 :param int sv_order: The Savitsky-Golay filter order. Default 3 ''' nflx, tlen = flux.shape # Get normalized fluxes med = np.nanmedian(flux, axis=1).reshape(-1, 1) y = flux - med # Compute the inverse of the variances invvar = 1. / err ** 2 # The CBVs for this set of fluxes cbvs = np.zeros((ncbv, tlen)) # Recover `ncbv` components for n in range(ncbv): # Initialize the weights and regressors c = np.zeros(nflx) a = np.ones(tlen) f = y * invvar # Perform `niter` iterations for i in range(niter): # Compute the `c` vector (the weights) c = np.dot(f, a) / np.dot(invvar, a ** 2) # Compute the `a` vector (the regressors) a = np.dot(c, f) / np.dot(c ** 2, invvar) # Remove this component from all light curves y -= np.outer(c, a) # Save this regressor after smoothing it a bit if sv_win >= len(a): sv_win = len(a) - 1 if sv_win % 2 == 0: sv_win -= 1 cbvs[n] = savgol_filter(a - np.nanmedian(a), sv_win, sv_order) return cbvs
[ "def", "SysRem", "(", "time", ",", "flux", ",", "err", ",", "ncbv", "=", "5", ",", "niter", "=", "50", ",", "sv_win", "=", "999", ",", "sv_order", "=", "3", ",", "*", "*", "kwargs", ")", ":", "nflx", ",", "tlen", "=", "flux", ".", "shape", "# Get normalized fluxes", "med", "=", "np", ".", "nanmedian", "(", "flux", ",", "axis", "=", "1", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "y", "=", "flux", "-", "med", "# Compute the inverse of the variances", "invvar", "=", "1.", "/", "err", "**", "2", "# The CBVs for this set of fluxes", "cbvs", "=", "np", ".", "zeros", "(", "(", "ncbv", ",", "tlen", ")", ")", "# Recover `ncbv` components", "for", "n", "in", "range", "(", "ncbv", ")", ":", "# Initialize the weights and regressors", "c", "=", "np", ".", "zeros", "(", "nflx", ")", "a", "=", "np", ".", "ones", "(", "tlen", ")", "f", "=", "y", "*", "invvar", "# Perform `niter` iterations", "for", "i", "in", "range", "(", "niter", ")", ":", "# Compute the `c` vector (the weights)", "c", "=", "np", ".", "dot", "(", "f", ",", "a", ")", "/", "np", ".", "dot", "(", "invvar", ",", "a", "**", "2", ")", "# Compute the `a` vector (the regressors)", "a", "=", "np", ".", "dot", "(", "c", ",", "f", ")", "/", "np", ".", "dot", "(", "c", "**", "2", ",", "invvar", ")", "# Remove this component from all light curves", "y", "-=", "np", ".", "outer", "(", "c", ",", "a", ")", "# Save this regressor after smoothing it a bit", "if", "sv_win", ">=", "len", "(", "a", ")", ":", "sv_win", "=", "len", "(", "a", ")", "-", "1", "if", "sv_win", "%", "2", "==", "0", ":", "sv_win", "-=", "1", "cbvs", "[", "n", "]", "=", "savgol_filter", "(", "a", "-", "np", ".", "nanmedian", "(", "a", ")", ",", "sv_win", ",", "sv_order", ")", "return", "cbvs" ]
Applies :py:obj:`SysRem` to a given set of light curves. :param array_like time: The time array for all of the light curves :param array_like flux: A 2D array of the fluxes for each of the light \ curves, shape `(nfluxes, ntime)` :param array_like err: A 2D array of the flux errors for each of the \ light curves, shape `(nfluxes, ntime)` :param int ncbv: The number of signals to recover. Default 5 :param int niter: The number of :py:obj:`SysRem` iterations to perform. \ Default 50 :param int sv_win: The Savitsky-Golay filter window size. Default 999 :param int sv_order: The Savitsky-Golay filter order. Default 3
[ "Applies", ":", "py", ":", "obj", ":", "SysRem", "to", "a", "given", "set", "of", "light", "curves", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/sysrem.py#L105-L162
rodluger/everest
everest/missions/k2/sysrem.py
GetCBVs
def GetCBVs(campaign, model='nPLD', clobber=False, **kwargs): ''' Computes the CBVs for a given campaign. :param int campaign: The campaign number :param str model: The name of the :py:obj:`everest` model. Default `nPLD` :param bool clobber: Overwrite existing files? Default `False` ''' # Initialize logging? if len(logging.getLogger().handlers) == 0: InitLog(file_name=None, screen_level=logging.DEBUG) log.info('Computing CBVs for campaign %d...' % (campaign)) # Output path path = os.path.join(EVEREST_DAT, 'k2', 'cbv', 'c%02d' % campaign) if not os.path.exists(path): os.makedirs(path) # Get the design matrix xfile = os.path.join(path, 'X.npz') if clobber or not os.path.exists(xfile): log.info('Obtaining light curves...') time = None for module in range(2, 25): # Get the light curves lcfile = os.path.join(path, '%d.npz' % module) if clobber or not os.path.exists(lcfile): try: time, breakpoints, fluxes, errors, kpars = GetStars( campaign, module, model=model, **kwargs) except AssertionError: continue np.savez(lcfile, time=time, breakpoints=breakpoints, fluxes=fluxes, errors=errors, kpars=kpars) # Load the light curves lcs = np.load(lcfile) if time is None: time = lcs['time'] breakpoints = lcs['breakpoints'] fluxes = lcs['fluxes'] errors = lcs['errors'] kpars = lcs['kpars'] else: fluxes = np.vstack([fluxes, lcs['fluxes']]) errors = np.vstack([errors, lcs['errors']]) kpars = np.vstack([kpars, lcs['kpars']]) # Compute the design matrix log.info('Running SysRem...') X = np.ones((len(time), 1 + kwargs.get('ncbv', 5))) # Loop over the segments new_fluxes = np.zeros_like(fluxes) for b in range(len(breakpoints)): # Get the current segment's indices inds = GetChunk(time, breakpoints, b) # Update the error arrays with the white GP component for j in range(len(errors)): errors[j] = np.sqrt(errors[j] ** 2 + kpars[j][0] ** 2) # Get de-trended fluxes X[inds, 1:] = SysRem(time[inds], fluxes[:, inds], errors[:, inds], **kwargs).T # Save np.savez(xfile, X=X, time=time, breakpoints=breakpoints) else: # Load from disk data = np.load(xfile) X = data['X'][()] time = data['time'][()] breakpoints = data['breakpoints'][()] # Plot plotfile = os.path.join(path, 'X.pdf') if clobber or not os.path.exists(plotfile): fig, ax = pl.subplots(2, 3, figsize=(12, 8)) fig.subplots_adjust(left=0.05, right=0.95) ax = ax.flatten() for axis in ax: axis.set_xticks([]) axis.set_yticks([]) for b in range(len(breakpoints)): inds = GetChunk(time, breakpoints, b) for n in range(min(6, X.shape[1])): ax[n].plot(time[inds], X[inds, n]) ax[n].set_title(n, fontsize=14) fig.savefig(plotfile, bbox_inches='tight') return X
python
def GetCBVs(campaign, model='nPLD', clobber=False, **kwargs): ''' Computes the CBVs for a given campaign. :param int campaign: The campaign number :param str model: The name of the :py:obj:`everest` model. Default `nPLD` :param bool clobber: Overwrite existing files? Default `False` ''' # Initialize logging? if len(logging.getLogger().handlers) == 0: InitLog(file_name=None, screen_level=logging.DEBUG) log.info('Computing CBVs for campaign %d...' % (campaign)) # Output path path = os.path.join(EVEREST_DAT, 'k2', 'cbv', 'c%02d' % campaign) if not os.path.exists(path): os.makedirs(path) # Get the design matrix xfile = os.path.join(path, 'X.npz') if clobber or not os.path.exists(xfile): log.info('Obtaining light curves...') time = None for module in range(2, 25): # Get the light curves lcfile = os.path.join(path, '%d.npz' % module) if clobber or not os.path.exists(lcfile): try: time, breakpoints, fluxes, errors, kpars = GetStars( campaign, module, model=model, **kwargs) except AssertionError: continue np.savez(lcfile, time=time, breakpoints=breakpoints, fluxes=fluxes, errors=errors, kpars=kpars) # Load the light curves lcs = np.load(lcfile) if time is None: time = lcs['time'] breakpoints = lcs['breakpoints'] fluxes = lcs['fluxes'] errors = lcs['errors'] kpars = lcs['kpars'] else: fluxes = np.vstack([fluxes, lcs['fluxes']]) errors = np.vstack([errors, lcs['errors']]) kpars = np.vstack([kpars, lcs['kpars']]) # Compute the design matrix log.info('Running SysRem...') X = np.ones((len(time), 1 + kwargs.get('ncbv', 5))) # Loop over the segments new_fluxes = np.zeros_like(fluxes) for b in range(len(breakpoints)): # Get the current segment's indices inds = GetChunk(time, breakpoints, b) # Update the error arrays with the white GP component for j in range(len(errors)): errors[j] = np.sqrt(errors[j] ** 2 + kpars[j][0] ** 2) # Get de-trended fluxes X[inds, 1:] = SysRem(time[inds], fluxes[:, inds], errors[:, inds], **kwargs).T # Save np.savez(xfile, X=X, time=time, breakpoints=breakpoints) else: # Load from disk data = np.load(xfile) X = data['X'][()] time = data['time'][()] breakpoints = data['breakpoints'][()] # Plot plotfile = os.path.join(path, 'X.pdf') if clobber or not os.path.exists(plotfile): fig, ax = pl.subplots(2, 3, figsize=(12, 8)) fig.subplots_adjust(left=0.05, right=0.95) ax = ax.flatten() for axis in ax: axis.set_xticks([]) axis.set_yticks([]) for b in range(len(breakpoints)): inds = GetChunk(time, breakpoints, b) for n in range(min(6, X.shape[1])): ax[n].plot(time[inds], X[inds, n]) ax[n].set_title(n, fontsize=14) fig.savefig(plotfile, bbox_inches='tight') return X
[ "def", "GetCBVs", "(", "campaign", ",", "model", "=", "'nPLD'", ",", "clobber", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Initialize logging?", "if", "len", "(", "logging", ".", "getLogger", "(", ")", ".", "handlers", ")", "==", "0", ":", "InitLog", "(", "file_name", "=", "None", ",", "screen_level", "=", "logging", ".", "DEBUG", ")", "log", ".", "info", "(", "'Computing CBVs for campaign %d...'", "%", "(", "campaign", ")", ")", "# Output path", "path", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'cbv'", ",", "'c%02d'", "%", "campaign", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "# Get the design matrix", "xfile", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'X.npz'", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "xfile", ")", ":", "log", ".", "info", "(", "'Obtaining light curves...'", ")", "time", "=", "None", "for", "module", "in", "range", "(", "2", ",", "25", ")", ":", "# Get the light curves", "lcfile", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%d.npz'", "%", "module", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "lcfile", ")", ":", "try", ":", "time", ",", "breakpoints", ",", "fluxes", ",", "errors", ",", "kpars", "=", "GetStars", "(", "campaign", ",", "module", ",", "model", "=", "model", ",", "*", "*", "kwargs", ")", "except", "AssertionError", ":", "continue", "np", ".", "savez", "(", "lcfile", ",", "time", "=", "time", ",", "breakpoints", "=", "breakpoints", ",", "fluxes", "=", "fluxes", ",", "errors", "=", "errors", ",", "kpars", "=", "kpars", ")", "# Load the light curves", "lcs", "=", "np", ".", "load", "(", "lcfile", ")", "if", "time", "is", "None", ":", "time", "=", "lcs", "[", "'time'", "]", "breakpoints", "=", "lcs", "[", "'breakpoints'", "]", "fluxes", "=", "lcs", "[", "'fluxes'", "]", "errors", "=", "lcs", "[", "'errors'", "]", "kpars", "=", "lcs", "[", "'kpars'", "]", "else", ":", "fluxes", "=", "np", ".", "vstack", "(", "[", "fluxes", ",", "lcs", "[", "'fluxes'", "]", "]", ")", "errors", "=", "np", ".", "vstack", "(", "[", "errors", ",", "lcs", "[", "'errors'", "]", "]", ")", "kpars", "=", "np", ".", "vstack", "(", "[", "kpars", ",", "lcs", "[", "'kpars'", "]", "]", ")", "# Compute the design matrix", "log", ".", "info", "(", "'Running SysRem...'", ")", "X", "=", "np", ".", "ones", "(", "(", "len", "(", "time", ")", ",", "1", "+", "kwargs", ".", "get", "(", "'ncbv'", ",", "5", ")", ")", ")", "# Loop over the segments", "new_fluxes", "=", "np", ".", "zeros_like", "(", "fluxes", ")", "for", "b", "in", "range", "(", "len", "(", "breakpoints", ")", ")", ":", "# Get the current segment's indices", "inds", "=", "GetChunk", "(", "time", ",", "breakpoints", ",", "b", ")", "# Update the error arrays with the white GP component", "for", "j", "in", "range", "(", "len", "(", "errors", ")", ")", ":", "errors", "[", "j", "]", "=", "np", ".", "sqrt", "(", "errors", "[", "j", "]", "**", "2", "+", "kpars", "[", "j", "]", "[", "0", "]", "**", "2", ")", "# Get de-trended fluxes", "X", "[", "inds", ",", "1", ":", "]", "=", "SysRem", "(", "time", "[", "inds", "]", ",", "fluxes", "[", ":", ",", "inds", "]", ",", "errors", "[", ":", ",", "inds", "]", ",", "*", "*", "kwargs", ")", ".", "T", "# Save", "np", ".", "savez", "(", "xfile", ",", "X", "=", "X", ",", "time", "=", "time", ",", "breakpoints", "=", "breakpoints", ")", "else", ":", "# Load from disk", "data", "=", "np", ".", "load", "(", "xfile", ")", "X", "=", "data", "[", "'X'", "]", "[", "(", ")", "]", "time", "=", "data", "[", "'time'", "]", "[", "(", ")", "]", "breakpoints", "=", "data", "[", "'breakpoints'", "]", "[", "(", ")", "]", "# Plot", "plotfile", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'X.pdf'", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "plotfile", ")", ":", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "2", ",", "3", ",", "figsize", "=", "(", "12", ",", "8", ")", ")", "fig", ".", "subplots_adjust", "(", "left", "=", "0.05", ",", "right", "=", "0.95", ")", "ax", "=", "ax", ".", "flatten", "(", ")", "for", "axis", "in", "ax", ":", "axis", ".", "set_xticks", "(", "[", "]", ")", "axis", ".", "set_yticks", "(", "[", "]", ")", "for", "b", "in", "range", "(", "len", "(", "breakpoints", ")", ")", ":", "inds", "=", "GetChunk", "(", "time", ",", "breakpoints", ",", "b", ")", "for", "n", "in", "range", "(", "min", "(", "6", ",", "X", ".", "shape", "[", "1", "]", ")", ")", ":", "ax", "[", "n", "]", ".", "plot", "(", "time", "[", "inds", "]", ",", "X", "[", "inds", ",", "n", "]", ")", "ax", "[", "n", "]", ".", "set_title", "(", "n", ",", "fontsize", "=", "14", ")", "fig", ".", "savefig", "(", "plotfile", ",", "bbox_inches", "=", "'tight'", ")", "return", "X" ]
Computes the CBVs for a given campaign. :param int campaign: The campaign number :param str model: The name of the :py:obj:`everest` model. Default `nPLD` :param bool clobber: Overwrite existing files? Default `False`
[ "Computes", "the", "CBVs", "for", "a", "given", "campaign", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/sysrem.py#L165-L263
lsbardel/python-stdnet
stdnet/backends/redisb/client/extensions.py
read_lua_file
def read_lua_file(dotted_module, path=None, context=None): '''Load lua script from the stdnet/lib/lua directory''' path = path or DEFAULT_LUA_PATH bits = dotted_module.split('.') bits[-1] += '.lua' name = os.path.join(path, *bits) with open(name) as f: data = f.read() if context: data = data.format(context) return data
python
def read_lua_file(dotted_module, path=None, context=None): '''Load lua script from the stdnet/lib/lua directory''' path = path or DEFAULT_LUA_PATH bits = dotted_module.split('.') bits[-1] += '.lua' name = os.path.join(path, *bits) with open(name) as f: data = f.read() if context: data = data.format(context) return data
[ "def", "read_lua_file", "(", "dotted_module", ",", "path", "=", "None", ",", "context", "=", "None", ")", ":", "path", "=", "path", "or", "DEFAULT_LUA_PATH", "bits", "=", "dotted_module", ".", "split", "(", "'.'", ")", "bits", "[", "-", "1", "]", "+=", "'.lua'", "name", "=", "os", ".", "path", ".", "join", "(", "path", ",", "*", "bits", ")", "with", "open", "(", "name", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "if", "context", ":", "data", "=", "data", ".", "format", "(", "context", ")", "return", "data" ]
Load lua script from the stdnet/lib/lua directory
[ "Load", "lua", "script", "from", "the", "stdnet", "/", "lib", "/", "lua", "directory" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L46-L56
lsbardel/python-stdnet
stdnet/backends/redisb/client/extensions.py
parse_info
def parse_info(response): '''Parse the response of Redis's INFO command into a Python dict. In doing so, convert byte data into unicode.''' info = {} response = response.decode('utf-8') def get_value(value): if ',' and '=' not in value: return value sub_dict = {} for item in value.split(','): k, v = item.split('=') try: sub_dict[k] = int(v) except ValueError: sub_dict[k] = v return sub_dict data = info for line in response.splitlines(): keyvalue = line.split(':') if len(keyvalue) == 2: key, value = keyvalue try: data[key] = int(value) except ValueError: data[key] = get_value(value) else: data = {} info[line[2:]] = data return info
python
def parse_info(response): '''Parse the response of Redis's INFO command into a Python dict. In doing so, convert byte data into unicode.''' info = {} response = response.decode('utf-8') def get_value(value): if ',' and '=' not in value: return value sub_dict = {} for item in value.split(','): k, v = item.split('=') try: sub_dict[k] = int(v) except ValueError: sub_dict[k] = v return sub_dict data = info for line in response.splitlines(): keyvalue = line.split(':') if len(keyvalue) == 2: key, value = keyvalue try: data[key] = int(value) except ValueError: data[key] = get_value(value) else: data = {} info[line[2:]] = data return info
[ "def", "parse_info", "(", "response", ")", ":", "info", "=", "{", "}", "response", "=", "response", ".", "decode", "(", "'utf-8'", ")", "def", "get_value", "(", "value", ")", ":", "if", "','", "and", "'='", "not", "in", "value", ":", "return", "value", "sub_dict", "=", "{", "}", "for", "item", "in", "value", ".", "split", "(", "','", ")", ":", "k", ",", "v", "=", "item", ".", "split", "(", "'='", ")", "try", ":", "sub_dict", "[", "k", "]", "=", "int", "(", "v", ")", "except", "ValueError", ":", "sub_dict", "[", "k", "]", "=", "v", "return", "sub_dict", "data", "=", "info", "for", "line", "in", "response", ".", "splitlines", "(", ")", ":", "keyvalue", "=", "line", ".", "split", "(", "':'", ")", "if", "len", "(", "keyvalue", ")", "==", "2", ":", "key", ",", "value", "=", "keyvalue", "try", ":", "data", "[", "key", "]", "=", "int", "(", "value", ")", "except", "ValueError", ":", "data", "[", "key", "]", "=", "get_value", "(", "value", ")", "else", ":", "data", "=", "{", "}", "info", "[", "line", "[", "2", ":", "]", "]", "=", "data", "return", "info" ]
Parse the response of Redis's INFO command into a Python dict. In doing so, convert byte data into unicode.
[ "Parse", "the", "response", "of", "Redis", "s", "INFO", "command", "into", "a", "Python", "dict", ".", "In", "doing", "so", "convert", "byte", "data", "into", "unicode", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L59-L88
lsbardel/python-stdnet
stdnet/backends/redisb/client/extensions.py
RedisExtensionsMixin.zdiffstore
def zdiffstore(self, dest, keys, withscores=False): '''Compute the difference of multiple sorted. The difference of sets specified by ``keys`` into a new sorted set in ``dest``. ''' keys = (dest,) + tuple(keys) wscores = 'withscores' if withscores else '' return self.execute_script('zdiffstore', keys, wscores, withscores=withscores)
python
def zdiffstore(self, dest, keys, withscores=False): '''Compute the difference of multiple sorted. The difference of sets specified by ``keys`` into a new sorted set in ``dest``. ''' keys = (dest,) + tuple(keys) wscores = 'withscores' if withscores else '' return self.execute_script('zdiffstore', keys, wscores, withscores=withscores)
[ "def", "zdiffstore", "(", "self", ",", "dest", ",", "keys", ",", "withscores", "=", "False", ")", ":", "keys", "=", "(", "dest", ",", ")", "+", "tuple", "(", "keys", ")", "wscores", "=", "'withscores'", "if", "withscores", "else", "''", "return", "self", ".", "execute_script", "(", "'zdiffstore'", ",", "keys", ",", "wscores", ",", "withscores", "=", "withscores", ")" ]
Compute the difference of multiple sorted. The difference of sets specified by ``keys`` into a new sorted set in ``dest``.
[ "Compute", "the", "difference", "of", "multiple", "sorted", ".", "The", "difference", "of", "sets", "specified", "by", "keys", "into", "a", "new", "sorted", "set", "in", "dest", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L156-L165
lsbardel/python-stdnet
stdnet/backends/redisb/client/extensions.py
RedisExtensionsMixin.zpopbyrank
def zpopbyrank(self, name, start, stop=None, withscores=False, desc=False): '''Pop a range by rank. ''' stop = stop if stop is not None else start return self.execute_script('zpop', (name,), 'rank', start, stop, int(desc), int(withscores), withscores=withscores)
python
def zpopbyrank(self, name, start, stop=None, withscores=False, desc=False): '''Pop a range by rank. ''' stop = stop if stop is not None else start return self.execute_script('zpop', (name,), 'rank', start, stop, int(desc), int(withscores), withscores=withscores)
[ "def", "zpopbyrank", "(", "self", ",", "name", ",", "start", ",", "stop", "=", "None", ",", "withscores", "=", "False", ",", "desc", "=", "False", ")", ":", "stop", "=", "stop", "if", "stop", "is", "not", "None", "else", "start", "return", "self", ".", "execute_script", "(", "'zpop'", ",", "(", "name", ",", ")", ",", "'rank'", ",", "start", ",", "stop", ",", "int", "(", "desc", ")", ",", "int", "(", "withscores", ")", ",", "withscores", "=", "withscores", ")" ]
Pop a range by rank.
[ "Pop", "a", "range", "by", "rank", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L167-L173
lsbardel/python-stdnet
stdnet/backends/redisb/client/extensions.py
RedisDbManager.delete
def delete(self, instance): '''Delete an instance''' flushdb(self.client) if flushdb else self.client.flushdb()
python
def delete(self, instance): '''Delete an instance''' flushdb(self.client) if flushdb else self.client.flushdb()
[ "def", "delete", "(", "self", ",", "instance", ")", ":", "flushdb", "(", "self", ".", "client", ")", "if", "flushdb", "else", "self", ".", "client", ".", "flushdb", "(", ")" ]
Delete an instance
[ "Delete", "an", "instance" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/extensions.py#L432-L434
rodluger/everest
docs/mcmc.py
lnprior
def lnprior(x): """Return the log prior given parameter vector `x`.""" per, t0, b = x if b < -1 or b > 1: return -np.inf elif per < 7 or per > 10: return -np.inf elif t0 < 1978 or t0 > 1979: return -np.inf else: return 0.
python
def lnprior(x): """Return the log prior given parameter vector `x`.""" per, t0, b = x if b < -1 or b > 1: return -np.inf elif per < 7 or per > 10: return -np.inf elif t0 < 1978 or t0 > 1979: return -np.inf else: return 0.
[ "def", "lnprior", "(", "x", ")", ":", "per", ",", "t0", ",", "b", "=", "x", "if", "b", "<", "-", "1", "or", "b", ">", "1", ":", "return", "-", "np", ".", "inf", "elif", "per", "<", "7", "or", "per", ">", "10", ":", "return", "-", "np", ".", "inf", "elif", "t0", "<", "1978", "or", "t0", ">", "1979", ":", "return", "-", "np", ".", "inf", "else", ":", "return", "0." ]
Return the log prior given parameter vector `x`.
[ "Return", "the", "log", "prior", "given", "parameter", "vector", "x", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/docs/mcmc.py#L11-L21
rodluger/everest
docs/mcmc.py
lnlike
def lnlike(x, star): """Return the log likelihood given parameter vector `x`.""" ll = lnprior(x) if np.isinf(ll): return ll, (np.nan, np.nan) per, t0, b = x model = TransitModel('b', per=per, t0=t0, b=b, rhos=10.)(star.time) like, d, vard = star.lnlike(model, full_output=True) ll += like return ll, (d,)
python
def lnlike(x, star): """Return the log likelihood given parameter vector `x`.""" ll = lnprior(x) if np.isinf(ll): return ll, (np.nan, np.nan) per, t0, b = x model = TransitModel('b', per=per, t0=t0, b=b, rhos=10.)(star.time) like, d, vard = star.lnlike(model, full_output=True) ll += like return ll, (d,)
[ "def", "lnlike", "(", "x", ",", "star", ")", ":", "ll", "=", "lnprior", "(", "x", ")", "if", "np", ".", "isinf", "(", "ll", ")", ":", "return", "ll", ",", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "per", ",", "t0", ",", "b", "=", "x", "model", "=", "TransitModel", "(", "'b'", ",", "per", "=", "per", ",", "t0", "=", "t0", ",", "b", "=", "b", ",", "rhos", "=", "10.", ")", "(", "star", ".", "time", ")", "like", ",", "d", ",", "vard", "=", "star", ".", "lnlike", "(", "model", ",", "full_output", "=", "True", ")", "ll", "+=", "like", "return", "ll", ",", "(", "d", ",", ")" ]
Return the log likelihood given parameter vector `x`.
[ "Return", "the", "log", "likelihood", "given", "parameter", "vector", "x", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/docs/mcmc.py#L24-L33
lsbardel/python-stdnet
examples/permissions.py
GroupManager.check_user
def check_user(self, username, email): '''username and email (if provided) must be unique.''' users = self.router.user avail = yield users.filter(username=username).count() if avail: raise FieldError('Username %s not available' % username) if email: avail = yield users.filter(email=email).count() if avail: raise FieldError('Email %s not available' % email)
python
def check_user(self, username, email): '''username and email (if provided) must be unique.''' users = self.router.user avail = yield users.filter(username=username).count() if avail: raise FieldError('Username %s not available' % username) if email: avail = yield users.filter(email=email).count() if avail: raise FieldError('Email %s not available' % email)
[ "def", "check_user", "(", "self", ",", "username", ",", "email", ")", ":", "users", "=", "self", ".", "router", ".", "user", "avail", "=", "yield", "users", ".", "filter", "(", "username", "=", "username", ")", ".", "count", "(", ")", "if", "avail", ":", "raise", "FieldError", "(", "'Username %s not available'", "%", "username", ")", "if", "email", ":", "avail", "=", "yield", "users", ".", "filter", "(", "email", "=", "email", ")", ".", "count", "(", ")", "if", "avail", ":", "raise", "FieldError", "(", "'Email %s not available'", "%", "email", ")" ]
username and email (if provided) must be unique.
[ "username", "and", "email", "(", "if", "provided", ")", "must", "be", "unique", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L150-L159
lsbardel/python-stdnet
examples/permissions.py
GroupManager.permitted_query
def permitted_query(self, query, group, operations): '''Change the ``query`` so that only instances for which ``group`` has roles with permission on ``operations`` are returned.''' session = query.session models = session.router user = group.user if user.is_superuser: # super-users have all permissions return query roles = group.roles.query() roles = group.roles.query() # query on all roles for group # The throgh model for Role/Permission relationship throgh_model = models.role.permissions.model models[throgh_model].filter(role=roles, permission__model_type=query.model, permission__operations=operations) # query on all relevant permissions permissions = router.permission.filter(model_type=query.model, level=operations) owner_query = query.filter(user=user) # all roles for the query model with appropriate permission level roles = models.role.filter(model_type=query.model, level__ge=level) # Now we need groups which have these roles groups = Role.groups.throughquery( session).filter(role=roles).get_field('group') # I need to know if user is in any of these groups if user.groups.filter(id=groups).count(): # it is, lets get the model with permissions less # or equal permission level permitted = models.instancerole.filter( role=roles).get_field('object_id') return owner_query.union(model.objects.filter(id=permitted)) else: return owner_query
python
def permitted_query(self, query, group, operations): '''Change the ``query`` so that only instances for which ``group`` has roles with permission on ``operations`` are returned.''' session = query.session models = session.router user = group.user if user.is_superuser: # super-users have all permissions return query roles = group.roles.query() roles = group.roles.query() # query on all roles for group # The throgh model for Role/Permission relationship throgh_model = models.role.permissions.model models[throgh_model].filter(role=roles, permission__model_type=query.model, permission__operations=operations) # query on all relevant permissions permissions = router.permission.filter(model_type=query.model, level=operations) owner_query = query.filter(user=user) # all roles for the query model with appropriate permission level roles = models.role.filter(model_type=query.model, level__ge=level) # Now we need groups which have these roles groups = Role.groups.throughquery( session).filter(role=roles).get_field('group') # I need to know if user is in any of these groups if user.groups.filter(id=groups).count(): # it is, lets get the model with permissions less # or equal permission level permitted = models.instancerole.filter( role=roles).get_field('object_id') return owner_query.union(model.objects.filter(id=permitted)) else: return owner_query
[ "def", "permitted_query", "(", "self", ",", "query", ",", "group", ",", "operations", ")", ":", "session", "=", "query", ".", "session", "models", "=", "session", ".", "router", "user", "=", "group", ".", "user", "if", "user", ".", "is_superuser", ":", "# super-users have all permissions", "return", "query", "roles", "=", "group", ".", "roles", ".", "query", "(", ")", "roles", "=", "group", ".", "roles", ".", "query", "(", ")", "# query on all roles for group", "# The throgh model for Role/Permission relationship", "throgh_model", "=", "models", ".", "role", ".", "permissions", ".", "model", "models", "[", "throgh_model", "]", ".", "filter", "(", "role", "=", "roles", ",", "permission__model_type", "=", "query", ".", "model", ",", "permission__operations", "=", "operations", ")", "# query on all relevant permissions", "permissions", "=", "router", ".", "permission", ".", "filter", "(", "model_type", "=", "query", ".", "model", ",", "level", "=", "operations", ")", "owner_query", "=", "query", ".", "filter", "(", "user", "=", "user", ")", "# all roles for the query model with appropriate permission level", "roles", "=", "models", ".", "role", ".", "filter", "(", "model_type", "=", "query", ".", "model", ",", "level__ge", "=", "level", ")", "# Now we need groups which have these roles", "groups", "=", "Role", ".", "groups", ".", "throughquery", "(", "session", ")", ".", "filter", "(", "role", "=", "roles", ")", ".", "get_field", "(", "'group'", ")", "# I need to know if user is in any of these groups", "if", "user", ".", "groups", ".", "filter", "(", "id", "=", "groups", ")", ".", "count", "(", ")", ":", "# it is, lets get the model with permissions less", "# or equal permission level", "permitted", "=", "models", ".", "instancerole", ".", "filter", "(", "role", "=", "roles", ")", ".", "get_field", "(", "'object_id'", ")", "return", "owner_query", ".", "union", "(", "model", ".", "objects", ".", "filter", "(", "id", "=", "permitted", ")", ")", "else", ":", "return", "owner_query" ]
Change the ``query`` so that only instances for which ``group`` has roles with permission on ``operations`` are returned.
[ "Change", "the", "query", "so", "that", "only", "instances", "for", "which", "group", "has", "roles", "with", "permission", "on", "operations", "are", "returned", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L168-L202
lsbardel/python-stdnet
examples/permissions.py
Subject.create_role
def create_role(self, name): '''Create a new :class:`Role` owned by this :class:`Subject`''' models = self.session.router return models.role.new(name=name, owner=self)
python
def create_role(self, name): '''Create a new :class:`Role` owned by this :class:`Subject`''' models = self.session.router return models.role.new(name=name, owner=self)
[ "def", "create_role", "(", "self", ",", "name", ")", ":", "models", "=", "self", ".", "session", ".", "router", "return", "models", ".", "role", ".", "new", "(", "name", "=", "name", ",", "owner", "=", "self", ")" ]
Create a new :class:`Role` owned by this :class:`Subject`
[ "Create", "a", "new", ":", "class", ":", "Role", "owned", "by", "this", ":", "class", ":", "Subject" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L208-L211
lsbardel/python-stdnet
examples/permissions.py
Subject.assign
def assign(self, role): '''Assign :class:`Role` ``role`` to this :class:`Subject`. If this :class:`Subject` is the :attr:`Role.owner`, this method does nothing.''' if role.owner_id != self.id: return self.roles.add(role)
python
def assign(self, role): '''Assign :class:`Role` ``role`` to this :class:`Subject`. If this :class:`Subject` is the :attr:`Role.owner`, this method does nothing.''' if role.owner_id != self.id: return self.roles.add(role)
[ "def", "assign", "(", "self", ",", "role", ")", ":", "if", "role", ".", "owner_id", "!=", "self", ".", "id", ":", "return", "self", ".", "roles", ".", "add", "(", "role", ")" ]
Assign :class:`Role` ``role`` to this :class:`Subject`. If this :class:`Subject` is the :attr:`Role.owner`, this method does nothing.
[ "Assign", ":", "class", ":", "Role", "role", "to", "this", ":", "class", ":", "Subject", ".", "If", "this", ":", "class", ":", "Subject", "is", "the", ":", "attr", ":", "Role", ".", "owner", "this", "method", "does", "nothing", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L213-L217
lsbardel/python-stdnet
examples/permissions.py
Subject.has_permissions
def has_permissions(self, object, group, operations): '''Check if this :class:`Subject` has permissions for ``operations`` on an ``object``. It returns the number of valid permissions.''' if self.is_superuser: return 1 else: models = self.session.router # valid permissions query = models.permission.for_object(object, operation=operations) objects = models[models.role.permissions.model] return objects.filter(role=self.role.query(), permission=query).count()
python
def has_permissions(self, object, group, operations): '''Check if this :class:`Subject` has permissions for ``operations`` on an ``object``. It returns the number of valid permissions.''' if self.is_superuser: return 1 else: models = self.session.router # valid permissions query = models.permission.for_object(object, operation=operations) objects = models[models.role.permissions.model] return objects.filter(role=self.role.query(), permission=query).count()
[ "def", "has_permissions", "(", "self", ",", "object", ",", "group", ",", "operations", ")", ":", "if", "self", ".", "is_superuser", ":", "return", "1", "else", ":", "models", "=", "self", ".", "session", ".", "router", "# valid permissions", "query", "=", "models", ".", "permission", ".", "for_object", "(", "object", ",", "operation", "=", "operations", ")", "objects", "=", "models", "[", "models", ".", "role", ".", "permissions", ".", "model", "]", "return", "objects", ".", "filter", "(", "role", "=", "self", ".", "role", ".", "query", "(", ")", ",", "permission", "=", "query", ")", ".", "count", "(", ")" ]
Check if this :class:`Subject` has permissions for ``operations`` on an ``object``. It returns the number of valid permissions.
[ "Check", "if", "this", ":", "class", ":", "Subject", "has", "permissions", "for", "operations", "on", "an", "object", ".", "It", "returns", "the", "number", "of", "valid", "permissions", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L219-L230
lsbardel/python-stdnet
examples/permissions.py
Role.add_permission
def add_permission(self, resource, operation): '''Add a new :class:`Permission` for ``resource`` to perform an ``operation``. The resource can be either an object or a model.''' if isclass(resource): model_type = resource pk = '' else: model_type = resource.__class__ pk = resource.pkvalue() p = Permission(model_type=model_type, object_pk=pk, operation=operation) session = self.session if session.transaction: session.add(p) self.permissions.add(p) return p else: with session.begin() as t: t.add(p) self.permissions.add(p) return t.add_callback(lambda r: p)
python
def add_permission(self, resource, operation): '''Add a new :class:`Permission` for ``resource`` to perform an ``operation``. The resource can be either an object or a model.''' if isclass(resource): model_type = resource pk = '' else: model_type = resource.__class__ pk = resource.pkvalue() p = Permission(model_type=model_type, object_pk=pk, operation=operation) session = self.session if session.transaction: session.add(p) self.permissions.add(p) return p else: with session.begin() as t: t.add(p) self.permissions.add(p) return t.add_callback(lambda r: p)
[ "def", "add_permission", "(", "self", ",", "resource", ",", "operation", ")", ":", "if", "isclass", "(", "resource", ")", ":", "model_type", "=", "resource", "pk", "=", "''", "else", ":", "model_type", "=", "resource", ".", "__class__", "pk", "=", "resource", ".", "pkvalue", "(", ")", "p", "=", "Permission", "(", "model_type", "=", "model_type", ",", "object_pk", "=", "pk", ",", "operation", "=", "operation", ")", "session", "=", "self", ".", "session", "if", "session", ".", "transaction", ":", "session", ".", "add", "(", "p", ")", "self", ".", "permissions", ".", "add", "(", "p", ")", "return", "p", "else", ":", "with", "session", ".", "begin", "(", ")", "as", "t", ":", "t", ".", "add", "(", "p", ")", "self", ".", "permissions", ".", "add", "(", "p", ")", "return", "t", ".", "add_callback", "(", "lambda", "r", ":", "p", ")" ]
Add a new :class:`Permission` for ``resource`` to perform an ``operation``. The resource can be either an object or a model.
[ "Add", "a", "new", ":", "class", ":", "Permission", "for", "resource", "to", "perform", "an", "operation", ".", "The", "resource", "can", "be", "either", "an", "object", "or", "a", "model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/examples/permissions.py#L305-L325
rbw/flask-snow
flask_snow/__init__.py
Snow.init_app
def init_app(self, app, session=None, parameters=None): """Initializes snow extension Set config default and find out which client type to use :param app: App passed from constructor or directly to init_app (factory) :param session: requests-compatible session to pass along to init_app :param parameters: `ParamsBuilder` object passed to `Client` after instantiation :raises: - ConfigError - if unable to determine client type """ if parameters is not None and not isinstance(parameters, ParamsBuilder): raise InvalidUsage("parameters should be a pysnow.ParamsBuilder object, not %r" % type(parameters).__name__) self._session = session self._parameters = parameters app.config.setdefault('SNOW_INSTANCE', None) app.config.setdefault('SNOW_HOST', None) app.config.setdefault('SNOW_USER', None) app.config.setdefault('SNOW_PASSWORD', None) app.config.setdefault('SNOW_OAUTH_CLIENT_ID', None) app.config.setdefault('SNOW_OAUTH_CLIENT_SECRET', None) app.config.setdefault('SNOW_USE_SSL', True) if app.config['SNOW_OAUTH_CLIENT_ID'] and app.config['SNOW_OAUTH_CLIENT_SECRET']: self._client_type_oauth = True elif self._session or (app.config['SNOW_USER'] and app.config['SNOW_PASSWORD']): self._client_type_basic = True else: raise ConfigError("You must supply user credentials, a session or OAuth credentials to use flask-snow")
python
def init_app(self, app, session=None, parameters=None): """Initializes snow extension Set config default and find out which client type to use :param app: App passed from constructor or directly to init_app (factory) :param session: requests-compatible session to pass along to init_app :param parameters: `ParamsBuilder` object passed to `Client` after instantiation :raises: - ConfigError - if unable to determine client type """ if parameters is not None and not isinstance(parameters, ParamsBuilder): raise InvalidUsage("parameters should be a pysnow.ParamsBuilder object, not %r" % type(parameters).__name__) self._session = session self._parameters = parameters app.config.setdefault('SNOW_INSTANCE', None) app.config.setdefault('SNOW_HOST', None) app.config.setdefault('SNOW_USER', None) app.config.setdefault('SNOW_PASSWORD', None) app.config.setdefault('SNOW_OAUTH_CLIENT_ID', None) app.config.setdefault('SNOW_OAUTH_CLIENT_SECRET', None) app.config.setdefault('SNOW_USE_SSL', True) if app.config['SNOW_OAUTH_CLIENT_ID'] and app.config['SNOW_OAUTH_CLIENT_SECRET']: self._client_type_oauth = True elif self._session or (app.config['SNOW_USER'] and app.config['SNOW_PASSWORD']): self._client_type_basic = True else: raise ConfigError("You must supply user credentials, a session or OAuth credentials to use flask-snow")
[ "def", "init_app", "(", "self", ",", "app", ",", "session", "=", "None", ",", "parameters", "=", "None", ")", ":", "if", "parameters", "is", "not", "None", "and", "not", "isinstance", "(", "parameters", ",", "ParamsBuilder", ")", ":", "raise", "InvalidUsage", "(", "\"parameters should be a pysnow.ParamsBuilder object, not %r\"", "%", "type", "(", "parameters", ")", ".", "__name__", ")", "self", ".", "_session", "=", "session", "self", ".", "_parameters", "=", "parameters", "app", ".", "config", ".", "setdefault", "(", "'SNOW_INSTANCE'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_HOST'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_USER'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_PASSWORD'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_OAUTH_CLIENT_ID'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_OAUTH_CLIENT_SECRET'", ",", "None", ")", "app", ".", "config", ".", "setdefault", "(", "'SNOW_USE_SSL'", ",", "True", ")", "if", "app", ".", "config", "[", "'SNOW_OAUTH_CLIENT_ID'", "]", "and", "app", ".", "config", "[", "'SNOW_OAUTH_CLIENT_SECRET'", "]", ":", "self", ".", "_client_type_oauth", "=", "True", "elif", "self", ".", "_session", "or", "(", "app", ".", "config", "[", "'SNOW_USER'", "]", "and", "app", ".", "config", "[", "'SNOW_PASSWORD'", "]", ")", ":", "self", ".", "_client_type_basic", "=", "True", "else", ":", "raise", "ConfigError", "(", "\"You must supply user credentials, a session or OAuth credentials to use flask-snow\"", ")" ]
Initializes snow extension Set config default and find out which client type to use :param app: App passed from constructor or directly to init_app (factory) :param session: requests-compatible session to pass along to init_app :param parameters: `ParamsBuilder` object passed to `Client` after instantiation :raises: - ConfigError - if unable to determine client type
[ "Initializes", "snow", "extension" ]
train
https://github.com/rbw/flask-snow/blob/8efae4c276c95044e67e41bccba1f24f9f8ef75e/flask_snow/__init__.py#L43-L74
rbw/flask-snow
flask_snow/__init__.py
Snow.connection
def connection(self): """Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack :returns: :class:`pysnow.Client` object """ ctx = stack.top.app if ctx is not None: if not hasattr(ctx, 'snow'): if self._client_type_oauth: if not self._token_updater: warnings.warn("No token updater has been set. Token refreshes will be ignored.") client = self._get_oauth_client() else: client = self._get_basic_client() if self._parameters: # Set parameters passed on app init client.parameters = self._parameters ctx.snow = client return ctx.snow
python
def connection(self): """Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack :returns: :class:`pysnow.Client` object """ ctx = stack.top.app if ctx is not None: if not hasattr(ctx, 'snow'): if self._client_type_oauth: if not self._token_updater: warnings.warn("No token updater has been set. Token refreshes will be ignored.") client = self._get_oauth_client() else: client = self._get_basic_client() if self._parameters: # Set parameters passed on app init client.parameters = self._parameters ctx.snow = client return ctx.snow
[ "def", "connection", "(", "self", ")", ":", "ctx", "=", "stack", ".", "top", ".", "app", "if", "ctx", "is", "not", "None", ":", "if", "not", "hasattr", "(", "ctx", ",", "'snow'", ")", ":", "if", "self", ".", "_client_type_oauth", ":", "if", "not", "self", ".", "_token_updater", ":", "warnings", ".", "warn", "(", "\"No token updater has been set. Token refreshes will be ignored.\"", ")", "client", "=", "self", ".", "_get_oauth_client", "(", ")", "else", ":", "client", "=", "self", ".", "_get_basic_client", "(", ")", "if", "self", ".", "_parameters", ":", "# Set parameters passed on app init", "client", ".", "parameters", "=", "self", ".", "_parameters", "ctx", ".", "snow", "=", "client", "return", "ctx", ".", "snow" ]
Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack :returns: :class:`pysnow.Client` object
[ "Snow", "connection", "instance", "stores", "a", "pysnow", ".", "Client", "instance", "and", "pysnow", ".", "Resource", "instances" ]
train
https://github.com/rbw/flask-snow/blob/8efae4c276c95044e67e41bccba1f24f9f8ef75e/flask_snow/__init__.py#L126-L151
behalf-oss/behave2cucumber
behave2cucumber/__main__.py
usage
def usage(): """Print out a usage message""" global options l = len(options['long']) options['shortlist'] = [s for s in options['short'] if s is not ":"] print("python -m behave2cucumber [-h] [-d level|--debug=level]") for i in range(l): print(" -{0}|--{1:20} {2}".format(options['shortlist'][i], options['long'][i], options['descriptions'][i]))
python
def usage(): """Print out a usage message""" global options l = len(options['long']) options['shortlist'] = [s for s in options['short'] if s is not ":"] print("python -m behave2cucumber [-h] [-d level|--debug=level]") for i in range(l): print(" -{0}|--{1:20} {2}".format(options['shortlist'][i], options['long'][i], options['descriptions'][i]))
[ "def", "usage", "(", ")", ":", "global", "options", "l", "=", "len", "(", "options", "[", "'long'", "]", ")", "options", "[", "'shortlist'", "]", "=", "[", "s", "for", "s", "in", "options", "[", "'short'", "]", "if", "s", "is", "not", "\":\"", "]", "print", "(", "\"python -m behave2cucumber [-h] [-d level|--debug=level]\"", ")", "for", "i", "in", "range", "(", "l", ")", ":", "print", "(", "\" -{0}|--{1:20} {2}\"", ".", "format", "(", "options", "[", "'shortlist'", "]", "[", "i", "]", ",", "options", "[", "'long'", "]", "[", "i", "]", ",", "options", "[", "'descriptions'", "]", "[", "i", "]", ")", ")" ]
Print out a usage message
[ "Print", "out", "a", "usage", "message" ]
train
https://github.com/behalf-oss/behave2cucumber/blob/a0ce3ebb8b2cd9bff5ba14c81e73dba7be36e657/behave2cucumber/__main__.py#L44-L53
behalf-oss/behave2cucumber
behave2cucumber/__main__.py
main
def main(argv): """Main""" global options opts = None try: opts, args = getopt.getopt(argv, options['short'], options['long']) except getopt.GetoptError: usage() exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() exit() elif opt in ("-d", "--debug"): try: arg = int(arg) log.debug("Debug level received: " + str(arg)) except ValueError: log.warning("Invalid log level: " + arg) continue if 0 <= arg <= 5: log.setLevel(60 - (arg*10)) log.critical("Log level changed to: " + str(logging.getLevelName(60 - (arg*10)))) else: log.warning("Invalid log level: " + str(arg)) infile = None outfile = None remove_background = False duration_format = False deduplicate = False for opt, arg in opts: if opt in ("-i", "--infile"): log.info("Input File: " + arg) infile = arg if opt in ("-o", "--outfile"): log.info("Output File: " + arg) outfile = arg if opt in ("-r", "--remove-background"): log.info("Remove Background: Enabled") remove_background = True if opt in ("-f", "--format-duration"): log.info("Format Duration: Enabled") duration_format = True if opt in ("-D", "--deduplicate"): log.info("Deduplicate: Enabled") deduplicate = True if infile is None: log.critical("No input JSON provided.") usage() exit(3) with open(infile) as f: cucumber_output = convert(json.load(f), remove_background=remove_background, duration_format=duration_format, deduplicate=deduplicate) if outfile is not None: with open(outfile, 'w') as f: json.dump(cucumber_output, f, indent=4, separators=(',', ': ')) else: pprint(cucumber_output)
python
def main(argv): """Main""" global options opts = None try: opts, args = getopt.getopt(argv, options['short'], options['long']) except getopt.GetoptError: usage() exit(2) for opt, arg in opts: if opt in ("-h", "--help"): usage() exit() elif opt in ("-d", "--debug"): try: arg = int(arg) log.debug("Debug level received: " + str(arg)) except ValueError: log.warning("Invalid log level: " + arg) continue if 0 <= arg <= 5: log.setLevel(60 - (arg*10)) log.critical("Log level changed to: " + str(logging.getLevelName(60 - (arg*10)))) else: log.warning("Invalid log level: " + str(arg)) infile = None outfile = None remove_background = False duration_format = False deduplicate = False for opt, arg in opts: if opt in ("-i", "--infile"): log.info("Input File: " + arg) infile = arg if opt in ("-o", "--outfile"): log.info("Output File: " + arg) outfile = arg if opt in ("-r", "--remove-background"): log.info("Remove Background: Enabled") remove_background = True if opt in ("-f", "--format-duration"): log.info("Format Duration: Enabled") duration_format = True if opt in ("-D", "--deduplicate"): log.info("Deduplicate: Enabled") deduplicate = True if infile is None: log.critical("No input JSON provided.") usage() exit(3) with open(infile) as f: cucumber_output = convert(json.load(f), remove_background=remove_background, duration_format=duration_format, deduplicate=deduplicate) if outfile is not None: with open(outfile, 'w') as f: json.dump(cucumber_output, f, indent=4, separators=(',', ': ')) else: pprint(cucumber_output)
[ "def", "main", "(", "argv", ")", ":", "global", "options", "opts", "=", "None", "try", ":", "opts", ",", "args", "=", "getopt", ".", "getopt", "(", "argv", ",", "options", "[", "'short'", "]", ",", "options", "[", "'long'", "]", ")", "except", "getopt", ".", "GetoptError", ":", "usage", "(", ")", "exit", "(", "2", ")", "for", "opt", ",", "arg", "in", "opts", ":", "if", "opt", "in", "(", "\"-h\"", ",", "\"--help\"", ")", ":", "usage", "(", ")", "exit", "(", ")", "elif", "opt", "in", "(", "\"-d\"", ",", "\"--debug\"", ")", ":", "try", ":", "arg", "=", "int", "(", "arg", ")", "log", ".", "debug", "(", "\"Debug level received: \"", "+", "str", "(", "arg", ")", ")", "except", "ValueError", ":", "log", ".", "warning", "(", "\"Invalid log level: \"", "+", "arg", ")", "continue", "if", "0", "<=", "arg", "<=", "5", ":", "log", ".", "setLevel", "(", "60", "-", "(", "arg", "*", "10", ")", ")", "log", ".", "critical", "(", "\"Log level changed to: \"", "+", "str", "(", "logging", ".", "getLevelName", "(", "60", "-", "(", "arg", "*", "10", ")", ")", ")", ")", "else", ":", "log", ".", "warning", "(", "\"Invalid log level: \"", "+", "str", "(", "arg", ")", ")", "infile", "=", "None", "outfile", "=", "None", "remove_background", "=", "False", "duration_format", "=", "False", "deduplicate", "=", "False", "for", "opt", ",", "arg", "in", "opts", ":", "if", "opt", "in", "(", "\"-i\"", ",", "\"--infile\"", ")", ":", "log", ".", "info", "(", "\"Input File: \"", "+", "arg", ")", "infile", "=", "arg", "if", "opt", "in", "(", "\"-o\"", ",", "\"--outfile\"", ")", ":", "log", ".", "info", "(", "\"Output File: \"", "+", "arg", ")", "outfile", "=", "arg", "if", "opt", "in", "(", "\"-r\"", ",", "\"--remove-background\"", ")", ":", "log", ".", "info", "(", "\"Remove Background: Enabled\"", ")", "remove_background", "=", "True", "if", "opt", "in", "(", "\"-f\"", ",", "\"--format-duration\"", ")", ":", "log", ".", "info", "(", "\"Format Duration: Enabled\"", ")", "duration_format", "=", "True", "if", "opt", "in", "(", "\"-D\"", ",", "\"--deduplicate\"", ")", ":", "log", ".", "info", "(", "\"Deduplicate: Enabled\"", ")", "deduplicate", "=", "True", "if", "infile", "is", "None", ":", "log", ".", "critical", "(", "\"No input JSON provided.\"", ")", "usage", "(", ")", "exit", "(", "3", ")", "with", "open", "(", "infile", ")", "as", "f", ":", "cucumber_output", "=", "convert", "(", "json", ".", "load", "(", "f", ")", ",", "remove_background", "=", "remove_background", ",", "duration_format", "=", "duration_format", ",", "deduplicate", "=", "deduplicate", ")", "if", "outfile", "is", "not", "None", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "cucumber_output", ",", "f", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "else", ":", "pprint", "(", "cucumber_output", ")" ]
Main
[ "Main" ]
train
https://github.com/behalf-oss/behave2cucumber/blob/a0ce3ebb8b2cd9bff5ba14c81e73dba7be36e657/behave2cucumber/__main__.py#L56-L123
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
direction
def direction(theta, phi): '''Return the direction vector of a cylinder defined by the spherical coordinates theta and phi. ''' return np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)])
python
def direction(theta, phi): '''Return the direction vector of a cylinder defined by the spherical coordinates theta and phi. ''' return np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)])
[ "def", "direction", "(", "theta", ",", "phi", ")", ":", "return", "np", ".", "array", "(", "[", "np", ".", "cos", "(", "phi", ")", "*", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "sin", "(", "phi", ")", "*", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "cos", "(", "theta", ")", "]", ")" ]
Return the direction vector of a cylinder defined by the spherical coordinates theta and phi.
[ "Return", "the", "direction", "vector", "of", "a", "cylinder", "defined", "by", "the", "spherical", "coordinates", "theta", "and", "phi", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L4-L9
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
projection_matrix
def projection_matrix(w): '''Return the projection matrix of a direction w.''' return np.identity(3) - np.dot(np.reshape(w, (3,1)), np.reshape(w, (1, 3)))
python
def projection_matrix(w): '''Return the projection matrix of a direction w.''' return np.identity(3) - np.dot(np.reshape(w, (3,1)), np.reshape(w, (1, 3)))
[ "def", "projection_matrix", "(", "w", ")", ":", "return", "np", ".", "identity", "(", "3", ")", "-", "np", ".", "dot", "(", "np", ".", "reshape", "(", "w", ",", "(", "3", ",", "1", ")", ")", ",", "np", ".", "reshape", "(", "w", ",", "(", "1", ",", "3", ")", ")", ")" ]
Return the projection matrix of a direction w.
[ "Return", "the", "projection", "matrix", "of", "a", "direction", "w", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L11-L13
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
skew_matrix
def skew_matrix(w): '''Return the skew matrix of a direction w.''' return np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])
python
def skew_matrix(w): '''Return the skew matrix of a direction w.''' return np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])
[ "def", "skew_matrix", "(", "w", ")", ":", "return", "np", ".", "array", "(", "[", "[", "0", ",", "-", "w", "[", "2", "]", ",", "w", "[", "1", "]", "]", ",", "[", "w", "[", "2", "]", ",", "0", ",", "-", "w", "[", "0", "]", "]", ",", "[", "-", "w", "[", "1", "]", ",", "w", "[", "0", "]", ",", "0", "]", "]", ")" ]
Return the skew matrix of a direction w.
[ "Return", "the", "skew", "matrix", "of", "a", "direction", "w", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L15-L19
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
calc_A
def calc_A(Ys): '''Return the matrix A from a list of Y vectors.''' return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3))) for Y in Ys)
python
def calc_A(Ys): '''Return the matrix A from a list of Y vectors.''' return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3))) for Y in Ys)
[ "def", "calc_A", "(", "Ys", ")", ":", "return", "sum", "(", "np", ".", "dot", "(", "np", ".", "reshape", "(", "Y", ",", "(", "3", ",", "1", ")", ")", ",", "np", ".", "reshape", "(", "Y", ",", "(", "1", ",", "3", ")", ")", ")", "for", "Y", "in", "Ys", ")" ]
Return the matrix A from a list of Y vectors.
[ "Return", "the", "matrix", "A", "from", "a", "list", "of", "Y", "vectors", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L21-L24
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
calc_A_hat
def calc_A_hat(A, S): '''Return the A_hat matrix of A given the skew matrix S''' return np.dot(S, np.dot(A, np.transpose(S)))
python
def calc_A_hat(A, S): '''Return the A_hat matrix of A given the skew matrix S''' return np.dot(S, np.dot(A, np.transpose(S)))
[ "def", "calc_A_hat", "(", "A", ",", "S", ")", ":", "return", "np", ".", "dot", "(", "S", ",", "np", ".", "dot", "(", "A", ",", "np", ".", "transpose", "(", "S", ")", ")", ")" ]
Return the A_hat matrix of A given the skew matrix S
[ "Return", "the", "A_hat", "matrix", "of", "A", "given", "the", "skew", "matrix", "S" ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L26-L28
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
preprocess_data
def preprocess_data(Xs_raw): '''Translate the center of mass (COM) of the data to the origin. Return the prossed data and the shift of the COM''' n = len(Xs_raw) Xs_raw_mean = sum(X for X in Xs_raw) / n return [X - Xs_raw_mean for X in Xs_raw], Xs_raw_mean
python
def preprocess_data(Xs_raw): '''Translate the center of mass (COM) of the data to the origin. Return the prossed data and the shift of the COM''' n = len(Xs_raw) Xs_raw_mean = sum(X for X in Xs_raw) / n return [X - Xs_raw_mean for X in Xs_raw], Xs_raw_mean
[ "def", "preprocess_data", "(", "Xs_raw", ")", ":", "n", "=", "len", "(", "Xs_raw", ")", "Xs_raw_mean", "=", "sum", "(", "X", "for", "X", "in", "Xs_raw", ")", "/", "n", "return", "[", "X", "-", "Xs_raw_mean", "for", "X", "in", "Xs_raw", "]", ",", "Xs_raw_mean" ]
Translate the center of mass (COM) of the data to the origin. Return the prossed data and the shift of the COM
[ "Translate", "the", "center", "of", "mass", "(", "COM", ")", "of", "the", "data", "to", "the", "origin", ".", "Return", "the", "prossed", "data", "and", "the", "shift", "of", "the", "COM" ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L30-L36
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
G
def G(w, Xs): '''Calculate the G function given a cylinder direction w and a list of data points Xs to be fitted.''' n = len(Xs) P = projection_matrix(w) Ys = [np.dot(P, X) for X in Xs] A = calc_A(Ys) A_hat = calc_A_hat(A, skew_matrix(w)) u = sum(np.dot(Y, Y) for Y in Ys) / n v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A)) return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys)
python
def G(w, Xs): '''Calculate the G function given a cylinder direction w and a list of data points Xs to be fitted.''' n = len(Xs) P = projection_matrix(w) Ys = [np.dot(P, X) for X in Xs] A = calc_A(Ys) A_hat = calc_A_hat(A, skew_matrix(w)) u = sum(np.dot(Y, Y) for Y in Ys) / n v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A)) return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys)
[ "def", "G", "(", "w", ",", "Xs", ")", ":", "n", "=", "len", "(", "Xs", ")", "P", "=", "projection_matrix", "(", "w", ")", "Ys", "=", "[", "np", ".", "dot", "(", "P", ",", "X", ")", "for", "X", "in", "Xs", "]", "A", "=", "calc_A", "(", "Ys", ")", "A_hat", "=", "calc_A_hat", "(", "A", ",", "skew_matrix", "(", "w", ")", ")", "u", "=", "sum", "(", "np", ".", "dot", "(", "Y", ",", "Y", ")", "for", "Y", "in", "Ys", ")", "/", "n", "v", "=", "np", ".", "dot", "(", "A_hat", ",", "sum", "(", "np", ".", "dot", "(", "Y", ",", "Y", ")", "*", "Y", "for", "Y", "in", "Ys", ")", ")", "/", "np", ".", "trace", "(", "np", ".", "dot", "(", "A_hat", ",", "A", ")", ")", "return", "sum", "(", "(", "np", ".", "dot", "(", "Y", ",", "Y", ")", "-", "u", "-", "2", "*", "np", ".", "dot", "(", "Y", ",", "v", ")", ")", "**", "2", "for", "Y", "in", "Ys", ")" ]
Calculate the G function given a cylinder direction w and a list of data points Xs to be fitted.
[ "Calculate", "the", "G", "function", "given", "a", "cylinder", "direction", "w", "and", "a", "list", "of", "data", "points", "Xs", "to", "be", "fitted", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L38-L51
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
C
def C(w, Xs): '''Calculate the cylinder center given the cylinder direction and a list of data points. ''' n = len(Xs) P = projection_matrix(w) Ys = [np.dot(P, X) for X in Xs] A = calc_A(Ys) A_hat = calc_A_hat(A, skew_matrix(w)) return np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))
python
def C(w, Xs): '''Calculate the cylinder center given the cylinder direction and a list of data points. ''' n = len(Xs) P = projection_matrix(w) Ys = [np.dot(P, X) for X in Xs] A = calc_A(Ys) A_hat = calc_A_hat(A, skew_matrix(w)) return np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))
[ "def", "C", "(", "w", ",", "Xs", ")", ":", "n", "=", "len", "(", "Xs", ")", "P", "=", "projection_matrix", "(", "w", ")", "Ys", "=", "[", "np", ".", "dot", "(", "P", ",", "X", ")", "for", "X", "in", "Xs", "]", "A", "=", "calc_A", "(", "Ys", ")", "A_hat", "=", "calc_A_hat", "(", "A", ",", "skew_matrix", "(", "w", ")", ")", "return", "np", ".", "dot", "(", "A_hat", ",", "sum", "(", "np", ".", "dot", "(", "Y", ",", "Y", ")", "*", "Y", "for", "Y", "in", "Ys", ")", ")", "/", "np", ".", "trace", "(", "np", ".", "dot", "(", "A_hat", ",", "A", ")", ")" ]
Calculate the cylinder center given the cylinder direction and a list of data points.
[ "Calculate", "the", "cylinder", "center", "given", "the", "cylinder", "direction", "and", "a", "list", "of", "data", "points", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L53-L63
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
r
def r(w, Xs): '''Calculate the radius given the cylinder direction and a list of data points. ''' n = len(Xs) P = projection_matrix(w) c = C(w, Xs) return np.sqrt(sum(np.dot(c - X, np.dot(P, c - X)) for X in Xs) / n)
python
def r(w, Xs): '''Calculate the radius given the cylinder direction and a list of data points. ''' n = len(Xs) P = projection_matrix(w) c = C(w, Xs) return np.sqrt(sum(np.dot(c - X, np.dot(P, c - X)) for X in Xs) / n)
[ "def", "r", "(", "w", ",", "Xs", ")", ":", "n", "=", "len", "(", "Xs", ")", "P", "=", "projection_matrix", "(", "w", ")", "c", "=", "C", "(", "w", ",", "Xs", ")", "return", "np", ".", "sqrt", "(", "sum", "(", "np", ".", "dot", "(", "c", "-", "X", ",", "np", ".", "dot", "(", "P", ",", "c", "-", "X", ")", ")", "for", "X", "in", "Xs", ")", "/", "n", ")" ]
Calculate the radius given the cylinder direction and a list of data points.
[ "Calculate", "the", "radius", "given", "the", "cylinder", "direction", "and", "a", "list", "of", "data", "points", "." ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L65-L73
xingjiepan/cylinder_fitting
cylinder_fitting/fitting.py
fit
def fit(data, guess_angles=None): '''Fit a list of data points to a cylinder surface. The algorithm implemented here is from David Eberly's paper "Fitting 3D Data with a Cylinder" from https://www.geometrictools.com/Documentation/CylinderFitting.pdf Arguments: data - A list of 3D data points to be fitted. guess_angles[0] - Guess of the theta angle of the axis direction guess_angles[1] - Guess of the phi angle of the axis direction Return: Direction of the cylinder axis A point on the cylinder axis Radius of the cylinder Fitting error (G function) ''' Xs, t = preprocess_data(data) # Set the start points start_points = [(0, 0), (np.pi / 2, 0), (np.pi / 2, np.pi / 2)] if guess_angles: start_points = guess_angles # Fit the cylinder from different start points best_fit = None best_score = float('inf') for sp in start_points: fitted = minimize(lambda x : G(direction(x[0], x[1]), Xs), sp, method='Powell', tol=1e-6) if fitted.fun < best_score: best_score = fitted.fun best_fit = fitted w = direction(best_fit.x[0], best_fit.x[1]) return w, C(w, Xs) + t, r(w, Xs), best_fit.fun
python
def fit(data, guess_angles=None): '''Fit a list of data points to a cylinder surface. The algorithm implemented here is from David Eberly's paper "Fitting 3D Data with a Cylinder" from https://www.geometrictools.com/Documentation/CylinderFitting.pdf Arguments: data - A list of 3D data points to be fitted. guess_angles[0] - Guess of the theta angle of the axis direction guess_angles[1] - Guess of the phi angle of the axis direction Return: Direction of the cylinder axis A point on the cylinder axis Radius of the cylinder Fitting error (G function) ''' Xs, t = preprocess_data(data) # Set the start points start_points = [(0, 0), (np.pi / 2, 0), (np.pi / 2, np.pi / 2)] if guess_angles: start_points = guess_angles # Fit the cylinder from different start points best_fit = None best_score = float('inf') for sp in start_points: fitted = minimize(lambda x : G(direction(x[0], x[1]), Xs), sp, method='Powell', tol=1e-6) if fitted.fun < best_score: best_score = fitted.fun best_fit = fitted w = direction(best_fit.x[0], best_fit.x[1]) return w, C(w, Xs) + t, r(w, Xs), best_fit.fun
[ "def", "fit", "(", "data", ",", "guess_angles", "=", "None", ")", ":", "Xs", ",", "t", "=", "preprocess_data", "(", "data", ")", "# Set the start points", "start_points", "=", "[", "(", "0", ",", "0", ")", ",", "(", "np", ".", "pi", "/", "2", ",", "0", ")", ",", "(", "np", ".", "pi", "/", "2", ",", "np", ".", "pi", "/", "2", ")", "]", "if", "guess_angles", ":", "start_points", "=", "guess_angles", "# Fit the cylinder from different start points ", "best_fit", "=", "None", "best_score", "=", "float", "(", "'inf'", ")", "for", "sp", "in", "start_points", ":", "fitted", "=", "minimize", "(", "lambda", "x", ":", "G", "(", "direction", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", ",", "Xs", ")", ",", "sp", ",", "method", "=", "'Powell'", ",", "tol", "=", "1e-6", ")", "if", "fitted", ".", "fun", "<", "best_score", ":", "best_score", "=", "fitted", ".", "fun", "best_fit", "=", "fitted", "w", "=", "direction", "(", "best_fit", ".", "x", "[", "0", "]", ",", "best_fit", ".", "x", "[", "1", "]", ")", "return", "w", ",", "C", "(", "w", ",", "Xs", ")", "+", "t", ",", "r", "(", "w", ",", "Xs", ")", ",", "best_fit", ".", "fun" ]
Fit a list of data points to a cylinder surface. The algorithm implemented here is from David Eberly's paper "Fitting 3D Data with a Cylinder" from https://www.geometrictools.com/Documentation/CylinderFitting.pdf Arguments: data - A list of 3D data points to be fitted. guess_angles[0] - Guess of the theta angle of the axis direction guess_angles[1] - Guess of the phi angle of the axis direction Return: Direction of the cylinder axis A point on the cylinder axis Radius of the cylinder Fitting error (G function)
[ "Fit", "a", "list", "of", "data", "points", "to", "a", "cylinder", "surface", ".", "The", "algorithm", "implemented", "here", "is", "from", "David", "Eberly", "s", "paper", "Fitting", "3D", "Data", "with", "a", "Cylinder", "from", "https", ":", "//", "www", ".", "geometrictools", ".", "com", "/", "Documentation", "/", "CylinderFitting", ".", "pdf" ]
train
https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/fitting.py#L75-L114
s4int/robotframework-KafkaLibrary
KafkaLibrary/Producer.py
Producer.connect_producer
def connect_producer(self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', **kwargs): """A Kafka client that publishes records to the Kafka cluster. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the producer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default to `localhost:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Default: `Robot`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html """ self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id, **kwargs)
python
def connect_producer(self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', **kwargs): """A Kafka client that publishes records to the Kafka cluster. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the producer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default to `localhost:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Default: `Robot`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html """ self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id, **kwargs)
[ "def", "connect_producer", "(", "self", ",", "bootstrap_servers", "=", "'127.0.0.1:9092'", ",", "client_id", "=", "'Robot'", ",", "*", "*", "kwargs", ")", ":", "self", ".", "producer", "=", "KafkaProducer", "(", "bootstrap_servers", "=", "bootstrap_servers", ",", "client_id", "=", "client_id", ",", "*", "*", "kwargs", ")" ]
A Kafka client that publishes records to the Kafka cluster. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the producer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default to `localhost:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Default: `Robot`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
[ "A", "Kafka", "client", "that", "publishes", "records", "to", "the", "Kafka", "cluster", ".", "Keyword", "Arguments", ":", "-", "bootstrap_servers", ":", "host", "[", ":", "port", "]", "string", "(", "or", "list", "of", "host", "[", ":", "port", "]", "strings", ")", "that", "the", "producer", "should", "contact", "to", "bootstrap", "initial", "cluster", "metadata", ".", "This", "does", "not", "have", "to", "be", "the", "full", "node", "list", ".", "It", "just", "needs", "to", "have", "at", "least", "one", "broker", "that", "will", "respond", "to", "a", "Metadata", "API", "Request", ".", "Default", "to", "localhost", ":", "9092", ".", "-", "client_id", "(", "str", ")", ":", "a", "name", "for", "this", "client", ".", "This", "string", "is", "passed", "in", "each", "request", "to", "servers", "and", "can", "be", "used", "to", "identify", "specific", "server", "-", "side", "log", "entries", "that", "correspond", "to", "this", "client", ".", "Default", ":", "Robot", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Producer.py#L7-L25
s4int/robotframework-KafkaLibrary
KafkaLibrary/Producer.py
Producer.send
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None): """Publish a message to a topic. - ``topic`` (str): topic where the message will be published - ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer. If value is None, key is required and message acts as a `delete`. - ``timeout`` - ``key``: a key to associate with the message. Can be used to determine which partition to send the message to. If partition is None (and producer's partitioner config is left as default), then messages with the same key will be delivered to the same partition (but if key is None, partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer. - ``partition`` (int): optionally specify a partition. If not set, the partition will be selected using the configured `partitioner`. - ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp. Defaults to current time. """ future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms) future.get(timeout=timeout)
python
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None): """Publish a message to a topic. - ``topic`` (str): topic where the message will be published - ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer. If value is None, key is required and message acts as a `delete`. - ``timeout`` - ``key``: a key to associate with the message. Can be used to determine which partition to send the message to. If partition is None (and producer's partitioner config is left as default), then messages with the same key will be delivered to the same partition (but if key is None, partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer. - ``partition`` (int): optionally specify a partition. If not set, the partition will be selected using the configured `partitioner`. - ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp. Defaults to current time. """ future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms) future.get(timeout=timeout)
[ "def", "send", "(", "self", ",", "topic", ",", "value", "=", "None", ",", "timeout", "=", "60", ",", "key", "=", "None", ",", "partition", "=", "None", ",", "timestamp_ms", "=", "None", ")", ":", "future", "=", "self", ".", "producer", ".", "send", "(", "topic", ",", "value", "=", "value", ",", "key", "=", "key", ",", "partition", "=", "partition", ",", "timestamp_ms", "=", "timestamp_ms", ")", "future", ".", "get", "(", "timeout", "=", "timeout", ")" ]
Publish a message to a topic. - ``topic`` (str): topic where the message will be published - ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer. If value is None, key is required and message acts as a `delete`. - ``timeout`` - ``key``: a key to associate with the message. Can be used to determine which partition to send the message to. If partition is None (and producer's partitioner config is left as default), then messages with the same key will be delivered to the same partition (but if key is None, partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer. - ``partition`` (int): optionally specify a partition. If not set, the partition will be selected using the configured `partitioner`. - ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp. Defaults to current time.
[ "Publish", "a", "message", "to", "a", "topic", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Producer.py#L27-L44
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.connect_consumer
def connect_consumer( self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', group_id=None, auto_offset_reset='latest', enable_auto_commit=True, **kwargs ): """Connect kafka consumer. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default: `127.0.0.1:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Also submitted to GroupCoordinator for logging with respect to consumer group administration. Default: `Robot`. - ``group_id`` (str or None): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. If None, auto-partition assignment (via group coordinator) and offset commits are disabled. Default: `None`. - ``auto_offset_reset`` (str): A policy for resetting offsets on OffsetOutOfRange errors: `earliest` will move to the oldest available message, `latest` will move to the most recent. Any other value will raise the exception. Default: `latest`. - ``enable_auto_commit`` (bool): If true the consumer's offset will be periodically committed in the background. Default: `True`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html """ self.consumer = KafkaConsumer( bootstrap_servers=bootstrap_servers, auto_offset_reset=auto_offset_reset, client_id=client_id, group_id=group_id, enable_auto_commit=enable_auto_commit, **kwargs )
python
def connect_consumer( self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', group_id=None, auto_offset_reset='latest', enable_auto_commit=True, **kwargs ): """Connect kafka consumer. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default: `127.0.0.1:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Also submitted to GroupCoordinator for logging with respect to consumer group administration. Default: `Robot`. - ``group_id`` (str or None): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. If None, auto-partition assignment (via group coordinator) and offset commits are disabled. Default: `None`. - ``auto_offset_reset`` (str): A policy for resetting offsets on OffsetOutOfRange errors: `earliest` will move to the oldest available message, `latest` will move to the most recent. Any other value will raise the exception. Default: `latest`. - ``enable_auto_commit`` (bool): If true the consumer's offset will be periodically committed in the background. Default: `True`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html """ self.consumer = KafkaConsumer( bootstrap_servers=bootstrap_servers, auto_offset_reset=auto_offset_reset, client_id=client_id, group_id=group_id, enable_auto_commit=enable_auto_commit, **kwargs )
[ "def", "connect_consumer", "(", "self", ",", "bootstrap_servers", "=", "'127.0.0.1:9092'", ",", "client_id", "=", "'Robot'", ",", "group_id", "=", "None", ",", "auto_offset_reset", "=", "'latest'", ",", "enable_auto_commit", "=", "True", ",", "*", "*", "kwargs", ")", ":", "self", ".", "consumer", "=", "KafkaConsumer", "(", "bootstrap_servers", "=", "bootstrap_servers", ",", "auto_offset_reset", "=", "auto_offset_reset", ",", "client_id", "=", "client_id", ",", "group_id", "=", "group_id", ",", "enable_auto_commit", "=", "enable_auto_commit", ",", "*", "*", "kwargs", ")" ]
Connect kafka consumer. Keyword Arguments: - ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]' strings) that the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default: `127.0.0.1:9092`. - ``client_id`` (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Also submitted to GroupCoordinator for logging with respect to consumer group administration. Default: `Robot`. - ``group_id`` (str or None): name of the consumer group to join for dynamic partition assignment (if enabled), and to use for fetching and committing offsets. If None, auto-partition assignment (via group coordinator) and offset commits are disabled. Default: `None`. - ``auto_offset_reset`` (str): A policy for resetting offsets on OffsetOutOfRange errors: `earliest` will move to the oldest available message, `latest` will move to the most recent. Any other value will raise the exception. Default: `latest`. - ``enable_auto_commit`` (bool): If true the consumer's offset will be periodically committed in the background. Default: `True`. Note: Configuration parameters are described in more detail at http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
[ "Connect", "kafka", "consumer", ".", "Keyword", "Arguments", ":", "-", "bootstrap_servers", ":", "host", "[", ":", "port", "]", "string", "(", "or", "list", "of", "host", "[", ":", "port", "]", "strings", ")", "that", "the", "consumer", "should", "contact", "to", "bootstrap", "initial", "cluster", "metadata", ".", "This", "does", "not", "have", "to", "be", "the", "full", "node", "list", ".", "It", "just", "needs", "to", "have", "at", "least", "one", "broker", "that", "will", "respond", "to", "a", "Metadata", "API", "Request", ".", "Default", ":", "127", ".", "0", ".", "0", ".", "1", ":", "9092", ".", "-", "client_id", "(", "str", ")", ":", "a", "name", "for", "this", "client", ".", "This", "string", "is", "passed", "in", "each", "request", "to", "servers", "and", "can", "be", "used", "to", "identify", "specific", "server", "-", "side", "log", "entries", "that", "correspond", "to", "this", "client", ".", "Also", "submitted", "to", "GroupCoordinator", "for", "logging", "with", "respect", "to", "consumer", "group", "administration", ".", "Default", ":", "Robot", ".", "-", "group_id", "(", "str", "or", "None", ")", ":", "name", "of", "the", "consumer", "group", "to", "join", "for", "dynamic", "partition", "assignment", "(", "if", "enabled", ")", "and", "to", "use", "for", "fetching", "and", "committing", "offsets", ".", "If", "None", "auto", "-", "partition", "assignment", "(", "via", "group", "coordinator", ")", "and", "offset", "commits", "are", "disabled", ".", "Default", ":", "None", ".", "-", "auto_offset_reset", "(", "str", ")", ":", "A", "policy", "for", "resetting", "offsets", "on", "OffsetOutOfRange", "errors", ":", "earliest", "will", "move", "to", "the", "oldest", "available", "message", "latest", "will", "move", "to", "the", "most", "recent", ".", "Any", "other", "value", "will", "raise", "the", "exception", ".", "Default", ":", "latest", ".", "-", "enable_auto_commit", "(", "bool", ")", ":", "If", "true", "the", "consumer", "s", "offset", "will", "be", "periodically", "committed", "in", "the", "background", ".", "Default", ":", "True", ".", "Note", ":", "Configuration", "parameters", "are", "described", "in", "more", "detail", "at", "http", ":", "//", "kafka", "-", "python", ".", "readthedocs", ".", "io", "/", "en", "/", "master", "/", "apidoc", "/", "KafkaConsumer", ".", "html" ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L7-L53
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.assign_to_topic_partition
def assign_to_topic_partition(self, topic_partition=None): """Assign a list of TopicPartitions to this consumer. - ``partitions`` (list of `TopicPartition`): Assignment for this instance. """ if isinstance(topic_partition, TopicPartition): topic_partition = [topic_partition] if not self._is_assigned(topic_partition): self.consumer.assign(topic_partition)
python
def assign_to_topic_partition(self, topic_partition=None): """Assign a list of TopicPartitions to this consumer. - ``partitions`` (list of `TopicPartition`): Assignment for this instance. """ if isinstance(topic_partition, TopicPartition): topic_partition = [topic_partition] if not self._is_assigned(topic_partition): self.consumer.assign(topic_partition)
[ "def", "assign_to_topic_partition", "(", "self", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "topic_partition", "=", "[", "topic_partition", "]", "if", "not", "self", ".", "_is_assigned", "(", "topic_partition", ")", ":", "self", ".", "consumer", ".", "assign", "(", "topic_partition", ")" ]
Assign a list of TopicPartitions to this consumer. - ``partitions`` (list of `TopicPartition`): Assignment for this instance.
[ "Assign", "a", "list", "of", "TopicPartitions", "to", "this", "consumer", ".", "-", "partitions", "(", "list", "of", "TopicPartition", ")", ":", "Assignment", "for", "this", "instance", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L75-L84
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.subscribe_topic
def subscribe_topic(self, topics=[], pattern=None): """Subscribe to a list of topics, or a topic regex pattern. - ``topics`` (list): List of topics for subscription. - ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern, but not both. """ if not isinstance(topics, list): topics = [topics] self.consumer.subscribe(topics, pattern=pattern)
python
def subscribe_topic(self, topics=[], pattern=None): """Subscribe to a list of topics, or a topic regex pattern. - ``topics`` (list): List of topics for subscription. - ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern, but not both. """ if not isinstance(topics, list): topics = [topics] self.consumer.subscribe(topics, pattern=pattern)
[ "def", "subscribe_topic", "(", "self", ",", "topics", "=", "[", "]", ",", "pattern", "=", "None", ")", ":", "if", "not", "isinstance", "(", "topics", ",", "list", ")", ":", "topics", "=", "[", "topics", "]", "self", ".", "consumer", ".", "subscribe", "(", "topics", ",", "pattern", "=", "pattern", ")" ]
Subscribe to a list of topics, or a topic regex pattern. - ``topics`` (list): List of topics for subscription. - ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern, but not both.
[ "Subscribe", "to", "a", "list", "of", "topics", "or", "a", "topic", "regex", "pattern", ".", "-", "topics", "(", "list", ")", ":", "List", "of", "topics", "for", "subscription", ".", "-", "pattern", "(", "str", ")", ":", "Pattern", "to", "match", "available", "topics", ".", "You", "must", "provide", "either", "topics", "or", "pattern", "but", "not", "both", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L86-L96
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.get_position
def get_position(self, topic_partition=None): """Return offset of the next record that will be fetched. - ``topic_partition`` (TopicPartition): Partition to check """ if isinstance(topic_partition, TopicPartition): return self.consumer.position(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
python
def get_position(self, topic_partition=None): """Return offset of the next record that will be fetched. - ``topic_partition`` (TopicPartition): Partition to check """ if isinstance(topic_partition, TopicPartition): return self.consumer.position(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
[ "def", "get_position", "(", "self", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "return", "self", ".", "consumer", ".", "position", "(", "topic_partition", ")", "else", ":", "raise", "TypeError", "(", "\"topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.\"", ")" ]
Return offset of the next record that will be fetched. - ``topic_partition`` (TopicPartition): Partition to check
[ "Return", "offset", "of", "the", "next", "record", "that", "will", "be", "fetched", ".", "-", "topic_partition", "(", "TopicPartition", ")", ":", "Partition", "to", "check" ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L98-L107
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.seek
def seek(self, offset, topic_partition=None): """Manually specify the fetch offset for a TopicPartition. - ``offset``: Message offset in partition - ``topic_partition`` (`TopicPartition`): Partition for seek operation """ if isinstance(topic_partition, TopicPartition): self.consumer.seek(topic_partition, offset=offset) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
python
def seek(self, offset, topic_partition=None): """Manually specify the fetch offset for a TopicPartition. - ``offset``: Message offset in partition - ``topic_partition`` (`TopicPartition`): Partition for seek operation """ if isinstance(topic_partition, TopicPartition): self.consumer.seek(topic_partition, offset=offset) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
[ "def", "seek", "(", "self", ",", "offset", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "self", ".", "consumer", ".", "seek", "(", "topic_partition", ",", "offset", "=", "offset", ")", "else", ":", "raise", "TypeError", "(", "\"topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.\"", ")" ]
Manually specify the fetch offset for a TopicPartition. - ``offset``: Message offset in partition - ``topic_partition`` (`TopicPartition`): Partition for seek operation
[ "Manually", "specify", "the", "fetch", "offset", "for", "a", "TopicPartition", ".", "-", "offset", ":", "Message", "offset", "in", "partition", "-", "topic_partition", "(", "TopicPartition", ")", ":", "Partition", "for", "seek", "operation" ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L109-L119
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.seek_to_beginning
def seek_to_beginning(self, topic_partition=None): """Seek to the oldest available offset for partitions. - ``topic_partition``: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. """ if isinstance(topic_partition, TopicPartition): self.consumer.seek_to_beginning(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
python
def seek_to_beginning(self, topic_partition=None): """Seek to the oldest available offset for partitions. - ``topic_partition``: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. """ if isinstance(topic_partition, TopicPartition): self.consumer.seek_to_beginning(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
[ "def", "seek_to_beginning", "(", "self", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "self", ".", "consumer", ".", "seek_to_beginning", "(", "topic_partition", ")", "else", ":", "raise", "TypeError", "(", "\"topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.\"", ")" ]
Seek to the oldest available offset for partitions. - ``topic_partition``: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions.
[ "Seek", "to", "the", "oldest", "available", "offset", "for", "partitions", ".", "-", "topic_partition", ":", "Optionally", "provide", "specific", "TopicPartitions", "otherwise", "default", "to", "all", "assigned", "partitions", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L121-L131
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.seek_to_end
def seek_to_end(self, topic_partition=None): """Seek to the most recent available offset for partitions. - ``topic_partition``: Optionally provide specific `TopicPartitions`, otherwise default to all assigned partitions. """ if isinstance(topic_partition, TopicPartition): self.consumer.seek_to_end(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
python
def seek_to_end(self, topic_partition=None): """Seek to the most recent available offset for partitions. - ``topic_partition``: Optionally provide specific `TopicPartitions`, otherwise default to all assigned partitions. """ if isinstance(topic_partition, TopicPartition): self.consumer.seek_to_end(topic_partition) else: raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
[ "def", "seek_to_end", "(", "self", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "self", ".", "consumer", ".", "seek_to_end", "(", "topic_partition", ")", "else", ":", "raise", "TypeError", "(", "\"topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.\"", ")" ]
Seek to the most recent available offset for partitions. - ``topic_partition``: Optionally provide specific `TopicPartitions`, otherwise default to all assigned partitions.
[ "Seek", "to", "the", "most", "recent", "available", "offset", "for", "partitions", ".", "-", "topic_partition", ":", "Optionally", "provide", "specific", "TopicPartitions", "otherwise", "default", "to", "all", "assigned", "partitions", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L133-L143
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.get_number_of_messages_in_topics
def get_number_of_messages_in_topics(self, topics): """Retrun number of messages in topics. - ``topics`` (list): list of topics. """ if not isinstance(topics, list): topics = [topics] number_of_messages = 0 for t in topics: part = self.get_kafka_partitions_for_topic(topic=t) Partitions = map(lambda p: TopicPartition(topic=t, partition=p), part) number_of_messages += self.get_number_of_messages_in_topicpartition(Partitions) return number_of_messages
python
def get_number_of_messages_in_topics(self, topics): """Retrun number of messages in topics. - ``topics`` (list): list of topics. """ if not isinstance(topics, list): topics = [topics] number_of_messages = 0 for t in topics: part = self.get_kafka_partitions_for_topic(topic=t) Partitions = map(lambda p: TopicPartition(topic=t, partition=p), part) number_of_messages += self.get_number_of_messages_in_topicpartition(Partitions) return number_of_messages
[ "def", "get_number_of_messages_in_topics", "(", "self", ",", "topics", ")", ":", "if", "not", "isinstance", "(", "topics", ",", "list", ")", ":", "topics", "=", "[", "topics", "]", "number_of_messages", "=", "0", "for", "t", "in", "topics", ":", "part", "=", "self", ".", "get_kafka_partitions_for_topic", "(", "topic", "=", "t", ")", "Partitions", "=", "map", "(", "lambda", "p", ":", "TopicPartition", "(", "topic", "=", "t", ",", "partition", "=", "p", ")", ",", "part", ")", "number_of_messages", "+=", "self", ".", "get_number_of_messages_in_topicpartition", "(", "Partitions", ")", "return", "number_of_messages" ]
Retrun number of messages in topics. - ``topics`` (list): list of topics.
[ "Retrun", "number", "of", "messages", "in", "topics", ".", "-", "topics", "(", "list", ")", ":", "list", "of", "topics", "." ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L150-L165
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.get_number_of_messages_in_topicpartition
def get_number_of_messages_in_topicpartition(self, topic_partition=None): """Return number of messages in TopicPartition. - ``topic_partition`` (list of TopicPartition) """ if isinstance(topic_partition, TopicPartition): topic_partition = [topic_partition] number_of_messages = 0 assignment = self.consumer.assignment() self.consumer.unsubscribe() for Partition in topic_partition: if not isinstance(Partition, TopicPartition): raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") self.assign_to_topic_partition(Partition) self.consumer.seek_to_end(Partition) end = self.consumer.position(Partition) self.consumer.seek_to_beginning(Partition) start = self.consumer.position(Partition) number_of_messages += end-start self.consumer.unsubscribe() self.consumer.assign(assignment) return number_of_messages
python
def get_number_of_messages_in_topicpartition(self, topic_partition=None): """Return number of messages in TopicPartition. - ``topic_partition`` (list of TopicPartition) """ if isinstance(topic_partition, TopicPartition): topic_partition = [topic_partition] number_of_messages = 0 assignment = self.consumer.assignment() self.consumer.unsubscribe() for Partition in topic_partition: if not isinstance(Partition, TopicPartition): raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") self.assign_to_topic_partition(Partition) self.consumer.seek_to_end(Partition) end = self.consumer.position(Partition) self.consumer.seek_to_beginning(Partition) start = self.consumer.position(Partition) number_of_messages += end-start self.consumer.unsubscribe() self.consumer.assign(assignment) return number_of_messages
[ "def", "get_number_of_messages_in_topicpartition", "(", "self", ",", "topic_partition", "=", "None", ")", ":", "if", "isinstance", "(", "topic_partition", ",", "TopicPartition", ")", ":", "topic_partition", "=", "[", "topic_partition", "]", "number_of_messages", "=", "0", "assignment", "=", "self", ".", "consumer", ".", "assignment", "(", ")", "self", ".", "consumer", ".", "unsubscribe", "(", ")", "for", "Partition", "in", "topic_partition", ":", "if", "not", "isinstance", "(", "Partition", ",", "TopicPartition", ")", ":", "raise", "TypeError", "(", "\"topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.\"", ")", "self", ".", "assign_to_topic_partition", "(", "Partition", ")", "self", ".", "consumer", ".", "seek_to_end", "(", "Partition", ")", "end", "=", "self", ".", "consumer", ".", "position", "(", "Partition", ")", "self", ".", "consumer", ".", "seek_to_beginning", "(", "Partition", ")", "start", "=", "self", ".", "consumer", ".", "position", "(", "Partition", ")", "number_of_messages", "+=", "end", "-", "start", "self", ".", "consumer", ".", "unsubscribe", "(", ")", "self", ".", "consumer", ".", "assign", "(", "assignment", ")", "return", "number_of_messages" ]
Return number of messages in TopicPartition. - ``topic_partition`` (list of TopicPartition)
[ "Return", "number", "of", "messages", "in", "TopicPartition", ".", "-", "topic_partition", "(", "list", "of", "TopicPartition", ")" ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L167-L194
s4int/robotframework-KafkaLibrary
KafkaLibrary/Consumer.py
Consumer.poll
def poll(self, timeout_ms=0, max_records=None): """Fetch data from assigned topics / partitions. - ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records. - ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer. If 0, returns immediately with any records that are available currently in the buffer, else returns empty. Must not be negative. Default: `0` """ messages = self.consumer.poll(timeout_ms=timeout_ms, max_records=max_records) result = [] for _, msg in messages.items(): for item in msg: result.append(item) return result
python
def poll(self, timeout_ms=0, max_records=None): """Fetch data from assigned topics / partitions. - ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records. - ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer. If 0, returns immediately with any records that are available currently in the buffer, else returns empty. Must not be negative. Default: `0` """ messages = self.consumer.poll(timeout_ms=timeout_ms, max_records=max_records) result = [] for _, msg in messages.items(): for item in msg: result.append(item) return result
[ "def", "poll", "(", "self", ",", "timeout_ms", "=", "0", ",", "max_records", "=", "None", ")", ":", "messages", "=", "self", ".", "consumer", ".", "poll", "(", "timeout_ms", "=", "timeout_ms", ",", "max_records", "=", "max_records", ")", "result", "=", "[", "]", "for", "_", ",", "msg", "in", "messages", ".", "items", "(", ")", ":", "for", "item", "in", "msg", ":", "result", ".", "append", "(", "item", ")", "return", "result" ]
Fetch data from assigned topics / partitions. - ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records. - ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer. If 0, returns immediately with any records that are available currently in the buffer, else returns empty. Must not be negative. Default: `0`
[ "Fetch", "data", "from", "assigned", "topics", "/", "partitions", ".", "-", "max_records", "(", "int", ")", ":", "maximum", "number", "of", "records", "to", "poll", ".", "Default", ":", "Inherit", "value", "from", "max_poll_records", ".", "-", "timeout_ms", "(", "int", ")", ":", "Milliseconds", "spent", "waiting", "in", "poll", "if", "data", "is", "not", "available", "in", "the", "buffer", ".", "If", "0", "returns", "immediately", "with", "any", "records", "that", "are", "available", "currently", "in", "the", "buffer", "else", "returns", "empty", ".", "Must", "not", "be", "negative", ".", "Default", ":", "0" ]
train
https://github.com/s4int/robotframework-KafkaLibrary/blob/1f05193958488a5d2e5de19a398ada9edab30d87/KafkaLibrary/Consumer.py#L196-L211
colab/colab
colab/accounts/views.py
EmailView.get
def get(self, request, key): """Validate an email with the given key""" try: email_val = EmailAddressValidation.objects.get(validation_key=key) except EmailAddressValidation.DoesNotExist: messages.error(request, _('The email address you are trying to ' 'verify either has already been verified' ' or does not exist.')) return redirect('/') try: email = EmailAddress.objects.get(address=email_val.address) except EmailAddress.DoesNotExist: email = EmailAddress(address=email_val.address) if email.user and email.user.is_active: messages.error(request, _('The email address you are trying to ' 'verify is already an active email ' 'address.')) email_val.delete() return redirect('/') email.user = email_val.user email.save() email_val.delete() user = User.objects.get(username=email.user.username) user.is_active = True user.save() messages.success(request, _('Email address verified!')) return redirect('user_profile', username=email_val.user.username)
python
def get(self, request, key): """Validate an email with the given key""" try: email_val = EmailAddressValidation.objects.get(validation_key=key) except EmailAddressValidation.DoesNotExist: messages.error(request, _('The email address you are trying to ' 'verify either has already been verified' ' or does not exist.')) return redirect('/') try: email = EmailAddress.objects.get(address=email_val.address) except EmailAddress.DoesNotExist: email = EmailAddress(address=email_val.address) if email.user and email.user.is_active: messages.error(request, _('The email address you are trying to ' 'verify is already an active email ' 'address.')) email_val.delete() return redirect('/') email.user = email_val.user email.save() email_val.delete() user = User.objects.get(username=email.user.username) user.is_active = True user.save() messages.success(request, _('Email address verified!')) return redirect('user_profile', username=email_val.user.username)
[ "def", "get", "(", "self", ",", "request", ",", "key", ")", ":", "try", ":", "email_val", "=", "EmailAddressValidation", ".", "objects", ".", "get", "(", "validation_key", "=", "key", ")", "except", "EmailAddressValidation", ".", "DoesNotExist", ":", "messages", ".", "error", "(", "request", ",", "_", "(", "'The email address you are trying to '", "'verify either has already been verified'", "' or does not exist.'", ")", ")", "return", "redirect", "(", "'/'", ")", "try", ":", "email", "=", "EmailAddress", ".", "objects", ".", "get", "(", "address", "=", "email_val", ".", "address", ")", "except", "EmailAddress", ".", "DoesNotExist", ":", "email", "=", "EmailAddress", "(", "address", "=", "email_val", ".", "address", ")", "if", "email", ".", "user", "and", "email", ".", "user", ".", "is_active", ":", "messages", ".", "error", "(", "request", ",", "_", "(", "'The email address you are trying to '", "'verify is already an active email '", "'address.'", ")", ")", "email_val", ".", "delete", "(", ")", "return", "redirect", "(", "'/'", ")", "email", ".", "user", "=", "email_val", ".", "user", "email", ".", "save", "(", ")", "email_val", ".", "delete", "(", ")", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "email", ".", "user", ".", "username", ")", "user", ".", "is_active", "=", "True", "user", ".", "save", "(", ")", "messages", ".", "success", "(", "request", ",", "_", "(", "'Email address verified!'", ")", ")", "return", "redirect", "(", "'user_profile'", ",", "username", "=", "email_val", ".", "user", ".", "username", ")" ]
Validate an email with the given key
[ "Validate", "an", "email", "with", "the", "given", "key" ]
train
https://github.com/colab/colab/blob/2ad099231e620bec647363b27d38006eca71e13b/colab/accounts/views.py#L91-L123
colab/colab
colab/accounts/views.py
EmailView.delete
def delete(self, request, key): """Remove an email address, validated or not.""" request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get('email') user_id = request.DELETE.get('user') if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
python
def delete(self, request, key): """Remove an email address, validated or not.""" request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get('email') user_id = request.DELETE.get('user') if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
[ "def", "delete", "(", "self", ",", "request", ",", "key", ")", ":", "request", ".", "DELETE", "=", "http", ".", "QueryDict", "(", "request", ".", "body", ")", "email_addr", "=", "request", ".", "DELETE", ".", "get", "(", "'email'", ")", "user_id", "=", "request", ".", "DELETE", ".", "get", "(", "'user'", ")", "if", "not", "email_addr", ":", "return", "http", ".", "HttpResponseBadRequest", "(", ")", "try", ":", "email", "=", "EmailAddressValidation", ".", "objects", ".", "get", "(", "address", "=", "email_addr", ",", "user_id", "=", "user_id", ")", "except", "EmailAddressValidation", ".", "DoesNotExist", ":", "pass", "else", ":", "email", ".", "delete", "(", ")", "return", "http", ".", "HttpResponse", "(", "status", "=", "204", ")", "try", ":", "email", "=", "EmailAddress", ".", "objects", ".", "get", "(", "address", "=", "email_addr", ",", "user_id", "=", "user_id", ")", "except", "EmailAddress", ".", "DoesNotExist", ":", "raise", "http", ".", "Http404", "email", ".", "user", "=", "None", "email", ".", "save", "(", ")", "return", "http", ".", "HttpResponse", "(", "status", "=", "204", ")" ]
Remove an email address, validated or not.
[ "Remove", "an", "email", "address", "validated", "or", "not", "." ]
train
https://github.com/colab/colab/blob/2ad099231e620bec647363b27d38006eca71e13b/colab/accounts/views.py#L151-L178