repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
rodluger/everest
everest/detrender.py
Detrender.name
def name(self): ''' Returns the name of the current :py:class:`Detrender` subclass. ''' if self.cadence == 'lc': return self.__class__.__name__ else: return '%s.sc' % self.__class__.__name__
python
def name(self): ''' Returns the name of the current :py:class:`Detrender` subclass. ''' if self.cadence == 'lc': return self.__class__.__name__ else: return '%s.sc' % self.__class__.__name__
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "cadence", "==", "'lc'", ":", "return", "self", ".", "__class__", ".", "__name__", "else", ":", "return", "'%s.sc'", "%", "self", ".", "__class__", ".", "__name__" ]
Returns the name of the current :py:class:`Detrender` subclass.
[ "Returns", "the", "name", "of", "the", "current", ":", "py", ":", "class", ":", "Detrender", "subclass", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L300-L309
rodluger/everest
everest/detrender.py
Detrender.cv_precompute
def cv_precompute(self, mask, b): ''' Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ''' # Get current chunk and mask outliers m1 = self.get_masked_chunk(b) flux = self.fraw[m1] K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1]) med = np.nanmedian(flux) # Now mask the validation set M = lambda x, axis = 0: np.delete(x, mask, axis=axis) m2 = M(m1) mK = M(M(K, axis=0), axis=1) f = M(flux) - med # Pre-compute the matrices A = [None for i in range(self.pld_order)] B = [None for i in range(self.pld_order)] for n in range(self.pld_order): # Only compute up to the current PLD order if self.lam_idx >= n: X2 = self.X(n, m2) X1 = self.X(n, m1) A[n] = np.dot(X2, X2.T) B[n] = np.dot(X1, X2.T) del X1, X2 if self.transit_model is None: C = 0 else: C = np.zeros((len(m2), len(m2))) mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0) f -= mean_transit_model for tm in self.transit_model: X2 = tm(self.time[m2]).reshape(-1, 1) C += tm.var_depth * np.dot(X2, X2.T) del X2 return A, B, C, mK, f, m1, m2
python
def cv_precompute(self, mask, b): ''' Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ''' # Get current chunk and mask outliers m1 = self.get_masked_chunk(b) flux = self.fraw[m1] K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1]) med = np.nanmedian(flux) # Now mask the validation set M = lambda x, axis = 0: np.delete(x, mask, axis=axis) m2 = M(m1) mK = M(M(K, axis=0), axis=1) f = M(flux) - med # Pre-compute the matrices A = [None for i in range(self.pld_order)] B = [None for i in range(self.pld_order)] for n in range(self.pld_order): # Only compute up to the current PLD order if self.lam_idx >= n: X2 = self.X(n, m2) X1 = self.X(n, m1) A[n] = np.dot(X2, X2.T) B[n] = np.dot(X1, X2.T) del X1, X2 if self.transit_model is None: C = 0 else: C = np.zeros((len(m2), len(m2))) mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0) f -= mean_transit_model for tm in self.transit_model: X2 = tm(self.time[m2]).reshape(-1, 1) C += tm.var_depth * np.dot(X2, X2.T) del X2 return A, B, C, mK, f, m1, m2
[ "def", "cv_precompute", "(", "self", ",", "mask", ",", "b", ")", ":", "# Get current chunk and mask outliers", "m1", "=", "self", ".", "get_masked_chunk", "(", "b", ")", "flux", "=", "self", ".", "fraw", "[", "m1", "]", "K", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "time", "[", "m1", "]", ",", "self", ".", "fraw_err", "[", "m1", "]", ")", "med", "=", "np", ".", "nanmedian", "(", "flux", ")", "# Now mask the validation set", "M", "=", "lambda", "x", ",", "axis", "=", "0", ":", "np", ".", "delete", "(", "x", ",", "mask", ",", "axis", "=", "axis", ")", "m2", "=", "M", "(", "m1", ")", "mK", "=", "M", "(", "M", "(", "K", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "f", "=", "M", "(", "flux", ")", "-", "med", "# Pre-compute the matrices", "A", "=", "[", "None", "for", "i", "in", "range", "(", "self", ".", "pld_order", ")", "]", "B", "=", "[", "None", "for", "i", "in", "range", "(", "self", ".", "pld_order", ")", "]", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "# Only compute up to the current PLD order", "if", "self", ".", "lam_idx", ">=", "n", ":", "X2", "=", "self", ".", "X", "(", "n", ",", "m2", ")", "X1", "=", "self", ".", "X", "(", "n", ",", "m1", ")", "A", "[", "n", "]", "=", "np", ".", "dot", "(", "X2", ",", "X2", ".", "T", ")", "B", "[", "n", "]", "=", "np", ".", "dot", "(", "X1", ",", "X2", ".", "T", ")", "del", "X1", ",", "X2", "if", "self", ".", "transit_model", "is", "None", ":", "C", "=", "0", "else", ":", "C", "=", "np", ".", "zeros", "(", "(", "len", "(", "m2", ")", ",", "len", "(", "m2", ")", ")", ")", "mean_transit_model", "=", "med", "*", "np", ".", "sum", "(", "[", "tm", ".", "depth", "*", "tm", "(", "self", ".", "time", "[", "m2", "]", ")", "for", "tm", "in", "self", ".", "transit_model", "]", ",", "axis", "=", "0", ")", "f", "-=", "mean_transit_model", "for", "tm", "in", "self", ".", "transit_model", ":", "X2", "=", "tm", "(", "self", ".", "time", "[", "m2", "]", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "C", "+=", "tm", ".", "var_depth", "*", "np", ".", "dot", "(", "X2", ",", "X2", ".", "T", ")", "del", "X2", "return", "A", ",", "B", ",", "C", ",", "mK", ",", "f", ",", "m1", ",", "m2" ]
Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`.
[ "Pre", "-", "compute", "the", "matrices", ":", "py", ":", "obj", ":", "A", "and", ":", "py", ":", "obj", ":", "B", "(", "cross", "-", "validation", "step", "only", ")", "for", "chunk", ":", "py", ":", "obj", ":", "b", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L327-L373
rodluger/everest
everest/detrender.py
Detrender.cv_compute
def cv_compute(self, b, A, B, C, mK, f, m1, m2): ''' Compute the model (cross-validation step only) for chunk :py:obj:`b`. ''' A = np.sum([l * a for l, a in zip(self.lam[b], A) if l is not None], axis=0) B = np.sum([l * b for l, b in zip(self.lam[b], B) if l is not None], axis=0) W = np.linalg.solve(mK + A + C, f) if self.transit_model is None: model = np.dot(B, W) else: w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W) for n, l in enumerate(self.lam[b]) if l is not None]) model = np.dot(np.hstack( [self.X(n, m1) for n, l in enumerate(self.lam[b]) if l is not None]), w_pld) model -= np.nanmedian(model) return model
python
def cv_compute(self, b, A, B, C, mK, f, m1, m2): ''' Compute the model (cross-validation step only) for chunk :py:obj:`b`. ''' A = np.sum([l * a for l, a in zip(self.lam[b], A) if l is not None], axis=0) B = np.sum([l * b for l, b in zip(self.lam[b], B) if l is not None], axis=0) W = np.linalg.solve(mK + A + C, f) if self.transit_model is None: model = np.dot(B, W) else: w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W) for n, l in enumerate(self.lam[b]) if l is not None]) model = np.dot(np.hstack( [self.X(n, m1) for n, l in enumerate(self.lam[b]) if l is not None]), w_pld) model -= np.nanmedian(model) return model
[ "def", "cv_compute", "(", "self", ",", "b", ",", "A", ",", "B", ",", "C", ",", "mK", ",", "f", ",", "m1", ",", "m2", ")", ":", "A", "=", "np", ".", "sum", "(", "[", "l", "*", "a", "for", "l", ",", "a", "in", "zip", "(", "self", ".", "lam", "[", "b", "]", ",", "A", ")", "if", "l", "is", "not", "None", "]", ",", "axis", "=", "0", ")", "B", "=", "np", ".", "sum", "(", "[", "l", "*", "b", "for", "l", ",", "b", "in", "zip", "(", "self", ".", "lam", "[", "b", "]", ",", "B", ")", "if", "l", "is", "not", "None", "]", ",", "axis", "=", "0", ")", "W", "=", "np", ".", "linalg", ".", "solve", "(", "mK", "+", "A", "+", "C", ",", "f", ")", "if", "self", ".", "transit_model", "is", "None", ":", "model", "=", "np", ".", "dot", "(", "B", ",", "W", ")", "else", ":", "w_pld", "=", "np", ".", "concatenate", "(", "[", "l", "*", "np", ".", "dot", "(", "self", ".", "X", "(", "n", ",", "m2", ")", ".", "T", ",", "W", ")", "for", "n", ",", "l", "in", "enumerate", "(", "self", ".", "lam", "[", "b", "]", ")", "if", "l", "is", "not", "None", "]", ")", "model", "=", "np", ".", "dot", "(", "np", ".", "hstack", "(", "[", "self", ".", "X", "(", "n", ",", "m1", ")", "for", "n", ",", "l", "in", "enumerate", "(", "self", ".", "lam", "[", "b", "]", ")", "if", "l", "is", "not", "None", "]", ")", ",", "w_pld", ")", "model", "-=", "np", ".", "nanmedian", "(", "model", ")", "return", "model" ]
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
[ "Compute", "the", "model", "(", "cross", "-", "validation", "step", "only", ")", "for", "chunk", ":", "py", ":", "obj", ":", "b", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L375-L397
rodluger/everest
everest/detrender.py
Detrender.get_outliers
def get_outliers(self): ''' Performs iterative sigma clipping to get outliers. ''' log.info("Clipping outliers...") log.info('Iter %d/%d: %d outliers' % (0, self.oiter, len(self.outmask))) def M(x): return np.delete(x, np.concatenate( [self.nanmask, self.badmask, self.transitmask]), axis=0) t = M(self.time) outmask = [np.array([-1]), np.array(self.outmask)] # Loop as long as the last two outlier arrays aren't equal while not np.array_equal(outmask[-2], outmask[-1]): # Check if we've done this too many times if len(outmask) - 1 > self.oiter: log.error('Maximum number of iterations in ' + '``get_outliers()`` exceeded. Skipping...') break # Check if we're going in circles if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]): log.error('Function ``get_outliers()`` ' + 'is going in circles. Skipping...') break # Compute the model to get the flux self.compute() # Get the outliers f = SavGol(M(self.flux)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.where((f > med + self.osigma * MAD) | (f < med - self.osigma * MAD))[0] # Project onto unmasked time array inds = np.array([np.argmax(self.time == t[i]) for i in inds]) self.outmask = np.array(inds, dtype=int) # Add them to the running list outmask.append(np.array(inds)) # Log log.info('Iter %d/%d: %d outliers' % (len(outmask) - 2, self.oiter, len(self.outmask)))
python
def get_outliers(self): ''' Performs iterative sigma clipping to get outliers. ''' log.info("Clipping outliers...") log.info('Iter %d/%d: %d outliers' % (0, self.oiter, len(self.outmask))) def M(x): return np.delete(x, np.concatenate( [self.nanmask, self.badmask, self.transitmask]), axis=0) t = M(self.time) outmask = [np.array([-1]), np.array(self.outmask)] # Loop as long as the last two outlier arrays aren't equal while not np.array_equal(outmask[-2], outmask[-1]): # Check if we've done this too many times if len(outmask) - 1 > self.oiter: log.error('Maximum number of iterations in ' + '``get_outliers()`` exceeded. Skipping...') break # Check if we're going in circles if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]): log.error('Function ``get_outliers()`` ' + 'is going in circles. Skipping...') break # Compute the model to get the flux self.compute() # Get the outliers f = SavGol(M(self.flux)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.where((f > med + self.osigma * MAD) | (f < med - self.osigma * MAD))[0] # Project onto unmasked time array inds = np.array([np.argmax(self.time == t[i]) for i in inds]) self.outmask = np.array(inds, dtype=int) # Add them to the running list outmask.append(np.array(inds)) # Log log.info('Iter %d/%d: %d outliers' % (len(outmask) - 2, self.oiter, len(self.outmask)))
[ "def", "get_outliers", "(", "self", ")", ":", "log", ".", "info", "(", "\"Clipping outliers...\"", ")", "log", ".", "info", "(", "'Iter %d/%d: %d outliers'", "%", "(", "0", ",", "self", ".", "oiter", ",", "len", "(", "self", ".", "outmask", ")", ")", ")", "def", "M", "(", "x", ")", ":", "return", "np", ".", "delete", "(", "x", ",", "np", ".", "concatenate", "(", "[", "self", ".", "nanmask", ",", "self", ".", "badmask", ",", "self", ".", "transitmask", "]", ")", ",", "axis", "=", "0", ")", "t", "=", "M", "(", "self", ".", "time", ")", "outmask", "=", "[", "np", ".", "array", "(", "[", "-", "1", "]", ")", ",", "np", ".", "array", "(", "self", ".", "outmask", ")", "]", "# Loop as long as the last two outlier arrays aren't equal", "while", "not", "np", ".", "array_equal", "(", "outmask", "[", "-", "2", "]", ",", "outmask", "[", "-", "1", "]", ")", ":", "# Check if we've done this too many times", "if", "len", "(", "outmask", ")", "-", "1", ">", "self", ".", "oiter", ":", "log", ".", "error", "(", "'Maximum number of iterations in '", "+", "'``get_outliers()`` exceeded. Skipping...'", ")", "break", "# Check if we're going in circles", "if", "np", ".", "any", "(", "[", "np", ".", "array_equal", "(", "outmask", "[", "-", "1", "]", ",", "i", ")", "for", "i", "in", "outmask", "[", ":", "-", "1", "]", "]", ")", ":", "log", ".", "error", "(", "'Function ``get_outliers()`` '", "+", "'is going in circles. Skipping...'", ")", "break", "# Compute the model to get the flux", "self", ".", "compute", "(", ")", "# Get the outliers", "f", "=", "SavGol", "(", "M", "(", "self", ".", "flux", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "inds", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "self", ".", "osigma", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "self", ".", "osigma", "*", "MAD", ")", ")", "[", "0", "]", "# Project onto unmasked time array", "inds", "=", "np", ".", "array", "(", "[", "np", ".", "argmax", "(", "self", ".", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "inds", "]", ")", "self", ".", "outmask", "=", "np", ".", "array", "(", "inds", ",", "dtype", "=", "int", ")", "# Add them to the running list", "outmask", ".", "append", "(", "np", ".", "array", "(", "inds", ")", ")", "# Log", "log", ".", "info", "(", "'Iter %d/%d: %d outliers'", "%", "(", "len", "(", "outmask", ")", "-", "2", ",", "self", ".", "oiter", ",", "len", "(", "self", ".", "outmask", ")", ")", ")" ]
Performs iterative sigma clipping to get outliers.
[ "Performs", "iterative", "sigma", "clipping", "to", "get", "outliers", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L399-L448
rodluger/everest
everest/detrender.py
Detrender.optimize_lambda
def optimize_lambda(self, validation): ''' Returns the index of :py:attr:`self.lambda_arr` that minimizes the validation scatter in the segment with minimum at the lowest value of :py:obj:`lambda`, with fractional tolerance :py:attr:`self.leps`. :param numpy.ndarray validation: The scatter in the validation set \ as a function of :py:obj:`lambda` ''' maxm = 0 minr = len(validation) for n in range(validation.shape[1]): # The index that minimizes the scatter for this segment m = np.nanargmin(validation[:, n]) if m > maxm: # The largest of the `m`s. maxm = m # The largest index with validation scatter within # `self.leps` of the minimum for this segment r = np.where((validation[:, n] - validation[m, n]) / validation[m, n] <= self.leps)[0][-1] if r < minr: # The smallest of the `r`s minr = r return min(maxm, minr)
python
def optimize_lambda(self, validation): ''' Returns the index of :py:attr:`self.lambda_arr` that minimizes the validation scatter in the segment with minimum at the lowest value of :py:obj:`lambda`, with fractional tolerance :py:attr:`self.leps`. :param numpy.ndarray validation: The scatter in the validation set \ as a function of :py:obj:`lambda` ''' maxm = 0 minr = len(validation) for n in range(validation.shape[1]): # The index that minimizes the scatter for this segment m = np.nanargmin(validation[:, n]) if m > maxm: # The largest of the `m`s. maxm = m # The largest index with validation scatter within # `self.leps` of the minimum for this segment r = np.where((validation[:, n] - validation[m, n]) / validation[m, n] <= self.leps)[0][-1] if r < minr: # The smallest of the `r`s minr = r return min(maxm, minr)
[ "def", "optimize_lambda", "(", "self", ",", "validation", ")", ":", "maxm", "=", "0", "minr", "=", "len", "(", "validation", ")", "for", "n", "in", "range", "(", "validation", ".", "shape", "[", "1", "]", ")", ":", "# The index that minimizes the scatter for this segment", "m", "=", "np", ".", "nanargmin", "(", "validation", "[", ":", ",", "n", "]", ")", "if", "m", ">", "maxm", ":", "# The largest of the `m`s.", "maxm", "=", "m", "# The largest index with validation scatter within", "# `self.leps` of the minimum for this segment", "r", "=", "np", ".", "where", "(", "(", "validation", "[", ":", ",", "n", "]", "-", "validation", "[", "m", ",", "n", "]", ")", "/", "validation", "[", "m", ",", "n", "]", "<=", "self", ".", "leps", ")", "[", "0", "]", "[", "-", "1", "]", "if", "r", "<", "minr", ":", "# The smallest of the `r`s", "minr", "=", "r", "return", "min", "(", "maxm", ",", "minr", ")" ]
Returns the index of :py:attr:`self.lambda_arr` that minimizes the validation scatter in the segment with minimum at the lowest value of :py:obj:`lambda`, with fractional tolerance :py:attr:`self.leps`. :param numpy.ndarray validation: The scatter in the validation set \ as a function of :py:obj:`lambda`
[ "Returns", "the", "index", "of", ":", "py", ":", "attr", ":", "self", ".", "lambda_arr", "that", "minimizes", "the", "validation", "scatter", "in", "the", "segment", "with", "minimum", "at", "the", "lowest", "value", "of", ":", "py", ":", "obj", ":", "lambda", "with", "fractional", "tolerance", ":", "py", ":", "attr", ":", "self", ".", "leps", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L450-L477
rodluger/everest
everest/detrender.py
Detrender.cross_validate
def cross_validate(self, ax, info=''): ''' Cross-validate to find the optimal value of :py:obj:`lambda`. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \ plot the cross-validation results. :param str info: The label to show in the bottom right-hand corner \ of the plot. Default `''` ''' # Loop over all chunks ax = np.atleast_1d(ax) for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) med_training = np.zeros_like(self.lambda_arr) med_validation = np.zeros_like(self.lambda_arr) # Mask for current chunk m = self.get_masked_chunk(b) # Check that we have enough data if len(m) < 3 * self.cdivs: self.cdppv_arr[b] = np.nan self.lam[b][self.lam_idx] = 0. log.info( "Insufficient data to run cross-validation on this chunk.") continue # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(flux) # The precision in the validation set validation = [[] for k, _ in enumerate(self.lambda_arr)] # The precision in the training set training = [[] for k, _ in enumerate(self.lambda_arr)] # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # Loop over the different masks for i, mask in enumerate(masks): log.info("Section %d/%d..." % (i + 1, len(masks))) # Pre-compute (training set) pre_t = self.cv_precompute([], b) # Pre-compute (validation set) pre_v = self.cv_precompute(mask, b) # Iterate over lambda for k, lam in enumerate(self.lambda_arr): # Update the lambda matrix self.lam[b][self.lam_idx] = lam # Training set model = self.cv_compute(b, *pre_t) training[k].append( self.fobj(flux - model, med, time, gp, mask)) # Validation set model = self.cv_compute(b, *pre_v) validation[k].append( self.fobj(flux - model, med, time, gp, mask)) # Finalize training = np.array(training) validation = np.array(validation) for k, _ in enumerate(self.lambda_arr): # Take the mean med_validation[k] = np.nanmean(validation[k]) med_training[k] = np.nanmean(training[k]) # Compute best model i = self.optimize_lambda(validation) v_best = med_validation[i] t_best = med_training[i] self.cdppv_arr[b] = v_best / t_best self.lam[b][self.lam_idx] = self.lambda_arr[i] log.info("Found optimum solution at log(lambda) = %.1f." % np.log10(self.lam[b][self.lam_idx])) # Plotting: There's not enough space in the DVS to show the # cross-val results for more than three light curve segments. if len(self.breakpoints) <= 3: # Plotting hack: first x tick will be -infty lambda_arr = np.array(self.lambda_arr) lambda_arr[0] = 10 ** (np.log10(lambda_arr[1]) - 3) # Plot cross-val for n in range(len(masks)): ax[b].plot(np.log10(lambda_arr), validation[:, n], 'r-', alpha=0.3) ax[b].plot(np.log10(lambda_arr), med_training, 'b-', lw=1., alpha=1) ax[b].plot(np.log10(lambda_arr), med_validation, 'r-', lw=1., alpha=1) ax[b].axvline(np.log10(self.lam[b][self.lam_idx]), color='k', ls='--', lw=0.75, alpha=0.75) ax[b].axhline(v_best, color='k', ls='--', lw=0.75, alpha=0.75) ax[b].set_ylabel(r'Scatter (ppm)', fontsize=5) hi = np.max(validation[0]) lo = np.min(training) rng = (hi - lo) ax[b].set_ylim(lo - 0.15 * rng, hi + 0.15 * rng) if rng > 2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP) ax[b].get_yaxis().set_major_locator( MaxNLocator(4, integer=True)) elif rng > 0.2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP1F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) else: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP2F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) # Fix the x ticks xticks = [np.log10(lambda_arr[0])] + list(np.linspace( np.log10(lambda_arr[1]), np.log10(lambda_arr[-1]), 6)) ax[b].set_xticks(xticks) ax[b].set_xticklabels(['' for x in xticks]) pad = 0.01 * \ (np.log10(lambda_arr[-1]) - np.log10(lambda_arr[0])) ax[b].set_xlim(np.log10(lambda_arr[0]) - pad, np.log10(lambda_arr[-1]) + pad) ax[b].annotate('%s.%d' % (info, b), xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=7, alpha=0.25, fontweight='bold') # Finally, compute the model self.compute() # Tidy up if len(ax) == 2: ax[0].xaxis.set_ticks_position('top') for axis in ax[1:]: axis.spines['top'].set_visible(False) axis.xaxis.set_ticks_position('bottom') if len(self.breakpoints) <= 3: # A hack to mark the first xtick as -infty labels = ['%.1f' % x for x in xticks] labels[0] = r'$-\infty$' ax[-1].set_xticklabels(labels) ax[-1].set_xlabel(r'Log $\Lambda$', fontsize=5) else: # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r.') ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r-', alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP and approximate validation CDPP cdpp_arr = self.get_cdpp_arr() cdppv_arr = self.cdppv_arr * cdpp_arr ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].plot(bs + 1, cdppv_arr, 'r.') ax[1].plot(bs + 1, cdppv_arr, 'r-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) if len(self.breakpoints) < 15: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) else: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1, 2))
python
def cross_validate(self, ax, info=''): ''' Cross-validate to find the optimal value of :py:obj:`lambda`. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \ plot the cross-validation results. :param str info: The label to show in the bottom right-hand corner \ of the plot. Default `''` ''' # Loop over all chunks ax = np.atleast_1d(ax) for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) med_training = np.zeros_like(self.lambda_arr) med_validation = np.zeros_like(self.lambda_arr) # Mask for current chunk m = self.get_masked_chunk(b) # Check that we have enough data if len(m) < 3 * self.cdivs: self.cdppv_arr[b] = np.nan self.lam[b][self.lam_idx] = 0. log.info( "Insufficient data to run cross-validation on this chunk.") continue # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(flux) # The precision in the validation set validation = [[] for k, _ in enumerate(self.lambda_arr)] # The precision in the training set training = [[] for k, _ in enumerate(self.lambda_arr)] # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # Loop over the different masks for i, mask in enumerate(masks): log.info("Section %d/%d..." % (i + 1, len(masks))) # Pre-compute (training set) pre_t = self.cv_precompute([], b) # Pre-compute (validation set) pre_v = self.cv_precompute(mask, b) # Iterate over lambda for k, lam in enumerate(self.lambda_arr): # Update the lambda matrix self.lam[b][self.lam_idx] = lam # Training set model = self.cv_compute(b, *pre_t) training[k].append( self.fobj(flux - model, med, time, gp, mask)) # Validation set model = self.cv_compute(b, *pre_v) validation[k].append( self.fobj(flux - model, med, time, gp, mask)) # Finalize training = np.array(training) validation = np.array(validation) for k, _ in enumerate(self.lambda_arr): # Take the mean med_validation[k] = np.nanmean(validation[k]) med_training[k] = np.nanmean(training[k]) # Compute best model i = self.optimize_lambda(validation) v_best = med_validation[i] t_best = med_training[i] self.cdppv_arr[b] = v_best / t_best self.lam[b][self.lam_idx] = self.lambda_arr[i] log.info("Found optimum solution at log(lambda) = %.1f." % np.log10(self.lam[b][self.lam_idx])) # Plotting: There's not enough space in the DVS to show the # cross-val results for more than three light curve segments. if len(self.breakpoints) <= 3: # Plotting hack: first x tick will be -infty lambda_arr = np.array(self.lambda_arr) lambda_arr[0] = 10 ** (np.log10(lambda_arr[1]) - 3) # Plot cross-val for n in range(len(masks)): ax[b].plot(np.log10(lambda_arr), validation[:, n], 'r-', alpha=0.3) ax[b].plot(np.log10(lambda_arr), med_training, 'b-', lw=1., alpha=1) ax[b].plot(np.log10(lambda_arr), med_validation, 'r-', lw=1., alpha=1) ax[b].axvline(np.log10(self.lam[b][self.lam_idx]), color='k', ls='--', lw=0.75, alpha=0.75) ax[b].axhline(v_best, color='k', ls='--', lw=0.75, alpha=0.75) ax[b].set_ylabel(r'Scatter (ppm)', fontsize=5) hi = np.max(validation[0]) lo = np.min(training) rng = (hi - lo) ax[b].set_ylim(lo - 0.15 * rng, hi + 0.15 * rng) if rng > 2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP) ax[b].get_yaxis().set_major_locator( MaxNLocator(4, integer=True)) elif rng > 0.2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP1F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) else: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP2F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) # Fix the x ticks xticks = [np.log10(lambda_arr[0])] + list(np.linspace( np.log10(lambda_arr[1]), np.log10(lambda_arr[-1]), 6)) ax[b].set_xticks(xticks) ax[b].set_xticklabels(['' for x in xticks]) pad = 0.01 * \ (np.log10(lambda_arr[-1]) - np.log10(lambda_arr[0])) ax[b].set_xlim(np.log10(lambda_arr[0]) - pad, np.log10(lambda_arr[-1]) + pad) ax[b].annotate('%s.%d' % (info, b), xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=7, alpha=0.25, fontweight='bold') # Finally, compute the model self.compute() # Tidy up if len(ax) == 2: ax[0].xaxis.set_ticks_position('top') for axis in ax[1:]: axis.spines['top'].set_visible(False) axis.xaxis.set_ticks_position('bottom') if len(self.breakpoints) <= 3: # A hack to mark the first xtick as -infty labels = ['%.1f' % x for x in xticks] labels[0] = r'$-\infty$' ax[-1].set_xticklabels(labels) ax[-1].set_xlabel(r'Log $\Lambda$', fontsize=5) else: # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r.') ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r-', alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP and approximate validation CDPP cdpp_arr = self.get_cdpp_arr() cdppv_arr = self.cdppv_arr * cdpp_arr ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].plot(bs + 1, cdppv_arr, 'r.') ax[1].plot(bs + 1, cdppv_arr, 'r-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) if len(self.breakpoints) < 15: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) else: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1, 2))
[ "def", "cross_validate", "(", "self", ",", "ax", ",", "info", "=", "''", ")", ":", "# Loop over all chunks", "ax", "=", "np", ".", "atleast_1d", "(", "ax", ")", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "log", ".", "info", "(", "\"Cross-validating chunk %d/%d...\"", "%", "(", "b", "+", "1", ",", "len", "(", "self", ".", "breakpoints", ")", ")", ")", "med_training", "=", "np", ".", "zeros_like", "(", "self", ".", "lambda_arr", ")", "med_validation", "=", "np", ".", "zeros_like", "(", "self", ".", "lambda_arr", ")", "# Mask for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ")", "# Check that we have enough data", "if", "len", "(", "m", ")", "<", "3", "*", "self", ".", "cdivs", ":", "self", ".", "cdppv_arr", "[", "b", "]", "=", "np", ".", "nan", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", "=", "0.", "log", ".", "info", "(", "\"Insufficient data to run cross-validation on this chunk.\"", ")", "continue", "# Mask transits and outliers", "time", "=", "self", ".", "time", "[", "m", "]", "flux", "=", "self", ".", "fraw", "[", "m", "]", "ferr", "=", "self", ".", "fraw_err", "[", "m", "]", "med", "=", "np", ".", "nanmedian", "(", "flux", ")", "# The precision in the validation set", "validation", "=", "[", "[", "]", "for", "k", ",", "_", "in", "enumerate", "(", "self", ".", "lambda_arr", ")", "]", "# The precision in the training set", "training", "=", "[", "[", "]", "for", "k", ",", "_", "in", "enumerate", "(", "self", ".", "lambda_arr", ")", "]", "# Setup the GP", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "white", "=", "False", ")", "gp", ".", "compute", "(", "time", ",", "ferr", ")", "# The masks", "masks", "=", "list", "(", "Chunks", "(", "np", ".", "arange", "(", "0", ",", "len", "(", "time", ")", ")", ",", "len", "(", "time", ")", "//", "self", ".", "cdivs", ")", ")", "# Loop over the different masks", "for", "i", ",", "mask", "in", "enumerate", "(", "masks", ")", ":", "log", ".", "info", "(", "\"Section %d/%d...\"", "%", "(", "i", "+", "1", ",", "len", "(", "masks", ")", ")", ")", "# Pre-compute (training set)", "pre_t", "=", "self", ".", "cv_precompute", "(", "[", "]", ",", "b", ")", "# Pre-compute (validation set)", "pre_v", "=", "self", ".", "cv_precompute", "(", "mask", ",", "b", ")", "# Iterate over lambda", "for", "k", ",", "lam", "in", "enumerate", "(", "self", ".", "lambda_arr", ")", ":", "# Update the lambda matrix", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", "=", "lam", "# Training set", "model", "=", "self", ".", "cv_compute", "(", "b", ",", "*", "pre_t", ")", "training", "[", "k", "]", ".", "append", "(", "self", ".", "fobj", "(", "flux", "-", "model", ",", "med", ",", "time", ",", "gp", ",", "mask", ")", ")", "# Validation set", "model", "=", "self", ".", "cv_compute", "(", "b", ",", "*", "pre_v", ")", "validation", "[", "k", "]", ".", "append", "(", "self", ".", "fobj", "(", "flux", "-", "model", ",", "med", ",", "time", ",", "gp", ",", "mask", ")", ")", "# Finalize", "training", "=", "np", ".", "array", "(", "training", ")", "validation", "=", "np", ".", "array", "(", "validation", ")", "for", "k", ",", "_", "in", "enumerate", "(", "self", ".", "lambda_arr", ")", ":", "# Take the mean", "med_validation", "[", "k", "]", "=", "np", ".", "nanmean", "(", "validation", "[", "k", "]", ")", "med_training", "[", "k", "]", "=", "np", ".", "nanmean", "(", "training", "[", "k", "]", ")", "# Compute best model", "i", "=", "self", ".", "optimize_lambda", "(", "validation", ")", "v_best", "=", "med_validation", "[", "i", "]", "t_best", "=", "med_training", "[", "i", "]", "self", ".", "cdppv_arr", "[", "b", "]", "=", "v_best", "/", "t_best", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", "=", "self", ".", "lambda_arr", "[", "i", "]", "log", ".", "info", "(", "\"Found optimum solution at log(lambda) = %.1f.\"", "%", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", ")", ")", "# Plotting: There's not enough space in the DVS to show the", "# cross-val results for more than three light curve segments.", "if", "len", "(", "self", ".", "breakpoints", ")", "<=", "3", ":", "# Plotting hack: first x tick will be -infty", "lambda_arr", "=", "np", ".", "array", "(", "self", ".", "lambda_arr", ")", "lambda_arr", "[", "0", "]", "=", "10", "**", "(", "np", ".", "log10", "(", "lambda_arr", "[", "1", "]", ")", "-", "3", ")", "# Plot cross-val", "for", "n", "in", "range", "(", "len", "(", "masks", ")", ")", ":", "ax", "[", "b", "]", ".", "plot", "(", "np", ".", "log10", "(", "lambda_arr", ")", ",", "validation", "[", ":", ",", "n", "]", ",", "'r-'", ",", "alpha", "=", "0.3", ")", "ax", "[", "b", "]", ".", "plot", "(", "np", ".", "log10", "(", "lambda_arr", ")", ",", "med_training", ",", "'b-'", ",", "lw", "=", "1.", ",", "alpha", "=", "1", ")", "ax", "[", "b", "]", ".", "plot", "(", "np", ".", "log10", "(", "lambda_arr", ")", ",", "med_validation", ",", "'r-'", ",", "lw", "=", "1.", ",", "alpha", "=", "1", ")", "ax", "[", "b", "]", ".", "axvline", "(", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", ")", ",", "color", "=", "'k'", ",", "ls", "=", "'--'", ",", "lw", "=", "0.75", ",", "alpha", "=", "0.75", ")", "ax", "[", "b", "]", ".", "axhline", "(", "v_best", ",", "color", "=", "'k'", ",", "ls", "=", "'--'", ",", "lw", "=", "0.75", ",", "alpha", "=", "0.75", ")", "ax", "[", "b", "]", ".", "set_ylabel", "(", "r'Scatter (ppm)'", ",", "fontsize", "=", "5", ")", "hi", "=", "np", ".", "max", "(", "validation", "[", "0", "]", ")", "lo", "=", "np", ".", "min", "(", "training", ")", "rng", "=", "(", "hi", "-", "lo", ")", "ax", "[", "b", "]", ".", "set_ylim", "(", "lo", "-", "0.15", "*", "rng", ",", "hi", "+", "0.15", "*", "rng", ")", "if", "rng", ">", "2", ":", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "CDPP", ")", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_locator", "(", "MaxNLocator", "(", "4", ",", "integer", "=", "True", ")", ")", "elif", "rng", ">", "0.2", ":", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "CDPP1F", ")", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_locator", "(", "MaxNLocator", "(", "4", ")", ")", "else", ":", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "CDPP2F", ")", "ax", "[", "b", "]", ".", "get_yaxis", "(", ")", ".", "set_major_locator", "(", "MaxNLocator", "(", "4", ")", ")", "# Fix the x ticks", "xticks", "=", "[", "np", ".", "log10", "(", "lambda_arr", "[", "0", "]", ")", "]", "+", "list", "(", "np", ".", "linspace", "(", "np", ".", "log10", "(", "lambda_arr", "[", "1", "]", ")", ",", "np", ".", "log10", "(", "lambda_arr", "[", "-", "1", "]", ")", ",", "6", ")", ")", "ax", "[", "b", "]", ".", "set_xticks", "(", "xticks", ")", "ax", "[", "b", "]", ".", "set_xticklabels", "(", "[", "''", "for", "x", "in", "xticks", "]", ")", "pad", "=", "0.01", "*", "(", "np", ".", "log10", "(", "lambda_arr", "[", "-", "1", "]", ")", "-", "np", ".", "log10", "(", "lambda_arr", "[", "0", "]", ")", ")", "ax", "[", "b", "]", ".", "set_xlim", "(", "np", ".", "log10", "(", "lambda_arr", "[", "0", "]", ")", "-", "pad", ",", "np", ".", "log10", "(", "lambda_arr", "[", "-", "1", "]", ")", "+", "pad", ")", "ax", "[", "b", "]", ".", "annotate", "(", "'%s.%d'", "%", "(", "info", ",", "b", ")", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "7", ",", "alpha", "=", "0.25", ",", "fontweight", "=", "'bold'", ")", "# Finally, compute the model", "self", ".", "compute", "(", ")", "# Tidy up", "if", "len", "(", "ax", ")", "==", "2", ":", "ax", "[", "0", "]", ".", "xaxis", ".", "set_ticks_position", "(", "'top'", ")", "for", "axis", "in", "ax", "[", "1", ":", "]", ":", "axis", ".", "spines", "[", "'top'", "]", ".", "set_visible", "(", "False", ")", "axis", ".", "xaxis", ".", "set_ticks_position", "(", "'bottom'", ")", "if", "len", "(", "self", ".", "breakpoints", ")", "<=", "3", ":", "# A hack to mark the first xtick as -infty", "labels", "=", "[", "'%.1f'", "%", "x", "for", "x", "in", "xticks", "]", "labels", "[", "0", "]", "=", "r'$-\\infty$'", "ax", "[", "-", "1", "]", ".", "set_xticklabels", "(", "labels", ")", "ax", "[", "-", "1", "]", ".", "set_xlabel", "(", "r'Log $\\Lambda$'", ",", "fontsize", "=", "5", ")", "else", ":", "# We're just going to plot lambda as a function of chunk number", "bs", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "breakpoints", ")", ")", "ax", "[", "0", "]", ".", "plot", "(", "bs", "+", "1", ",", "[", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", ")", "for", "b", "in", "bs", "]", ",", "'r.'", ")", "ax", "[", "0", "]", ".", "plot", "(", "bs", "+", "1", ",", "[", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "self", ".", "lam_idx", "]", ")", "for", "b", "in", "bs", "]", ",", "'r-'", ",", "alpha", "=", "0.25", ")", "ax", "[", "0", "]", ".", "set_ylabel", "(", "r'$\\log\\Lambda$'", ",", "fontsize", "=", "5", ")", "ax", "[", "0", "]", ".", "margins", "(", "0.1", ",", "0.1", ")", "ax", "[", "0", "]", ".", "set_xticks", "(", "np", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "breakpoints", ")", "+", "1", ")", ")", "ax", "[", "0", "]", ".", "set_xticklabels", "(", "[", "]", ")", "# Now plot the CDPP and approximate validation CDPP", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "cdppv_arr", "=", "self", ".", "cdppv_arr", "*", "cdpp_arr", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdpp_arr", ",", "'b.'", ")", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdpp_arr", ",", "'b-'", ",", "alpha", "=", "0.25", ")", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdppv_arr", ",", "'r.'", ")", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdppv_arr", ",", "'r-'", ",", "alpha", "=", "0.25", ")", "ax", "[", "1", "]", ".", "margins", "(", "0.1", ",", "0.1", ")", "ax", "[", "1", "]", ".", "set_ylabel", "(", "r'Scatter (ppm)'", ",", "fontsize", "=", "5", ")", "ax", "[", "1", "]", ".", "set_xlabel", "(", "r'Chunk'", ",", "fontsize", "=", "5", ")", "if", "len", "(", "self", ".", "breakpoints", ")", "<", "15", ":", "ax", "[", "1", "]", ".", "set_xticks", "(", "np", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "breakpoints", ")", "+", "1", ")", ")", "else", ":", "ax", "[", "1", "]", ".", "set_xticks", "(", "np", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "breakpoints", ")", "+", "1", ",", "2", ")", ")" ]
Cross-validate to find the optimal value of :py:obj:`lambda`. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \ plot the cross-validation results. :param str info: The label to show in the bottom right-hand corner \ of the plot. Default `''`
[ "Cross", "-", "validate", "to", "find", "the", "optimal", "value", "of", ":", "py", ":", "obj", ":", "lambda", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L498-L688
rodluger/everest
everest/detrender.py
Detrender.get_ylim
def get_ylim(self): ''' Computes the ideal y-axis limits for the light curve plot. Attempts to set the limits equal to those of the raw light curve, but if more than 1% of the flux lies either above or below these limits, auto-expands to include those points. At the end, adds 5% padding to both the top and the bottom. ''' bn = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) fraw = np.delete(self.fraw, bn) lo, hi = fraw[np.argsort(fraw)][[3, -3]] flux = np.delete(self.flux, bn) fsort = flux[np.argsort(flux)] if fsort[int(0.01 * len(fsort))] < lo: lo = fsort[int(0.01 * len(fsort))] if fsort[int(0.99 * len(fsort))] > hi: hi = fsort[int(0.99 * len(fsort))] pad = (hi - lo) * 0.05 ylim = (lo - pad, hi + pad) return ylim
python
def get_ylim(self): ''' Computes the ideal y-axis limits for the light curve plot. Attempts to set the limits equal to those of the raw light curve, but if more than 1% of the flux lies either above or below these limits, auto-expands to include those points. At the end, adds 5% padding to both the top and the bottom. ''' bn = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) fraw = np.delete(self.fraw, bn) lo, hi = fraw[np.argsort(fraw)][[3, -3]] flux = np.delete(self.flux, bn) fsort = flux[np.argsort(flux)] if fsort[int(0.01 * len(fsort))] < lo: lo = fsort[int(0.01 * len(fsort))] if fsort[int(0.99 * len(fsort))] > hi: hi = fsort[int(0.99 * len(fsort))] pad = (hi - lo) * 0.05 ylim = (lo - pad, hi + pad) return ylim
[ "def", "get_ylim", "(", "self", ")", ":", "bn", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "badmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "fraw", "=", "np", ".", "delete", "(", "self", ".", "fraw", ",", "bn", ")", "lo", ",", "hi", "=", "fraw", "[", "np", ".", "argsort", "(", "fraw", ")", "]", "[", "[", "3", ",", "-", "3", "]", "]", "flux", "=", "np", ".", "delete", "(", "self", ".", "flux", ",", "bn", ")", "fsort", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "if", "fsort", "[", "int", "(", "0.01", "*", "len", "(", "fsort", ")", ")", "]", "<", "lo", ":", "lo", "=", "fsort", "[", "int", "(", "0.01", "*", "len", "(", "fsort", ")", ")", "]", "if", "fsort", "[", "int", "(", "0.99", "*", "len", "(", "fsort", ")", ")", "]", ">", "hi", ":", "hi", "=", "fsort", "[", "int", "(", "0.99", "*", "len", "(", "fsort", ")", ")", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.05", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "return", "ylim" ]
Computes the ideal y-axis limits for the light curve plot. Attempts to set the limits equal to those of the raw light curve, but if more than 1% of the flux lies either above or below these limits, auto-expands to include those points. At the end, adds 5% padding to both the top and the bottom.
[ "Computes", "the", "ideal", "y", "-", "axis", "limits", "for", "the", "light", "curve", "plot", ".", "Attempts", "to", "set", "the", "limits", "equal", "to", "those", "of", "the", "raw", "light", "curve", "but", "if", "more", "than", "1%", "of", "the", "flux", "lies", "either", "above", "or", "below", "these", "limits", "auto", "-", "expands", "to", "include", "those", "points", ".", "At", "the", "end", "adds", "5%", "padding", "to", "both", "the", "top", "and", "the", "bottom", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L700-L722
rodluger/everest
everest/detrender.py
Detrender.plot_lc
def plot_lc(self, ax, info_left='', info_right='', color='b'): ''' Plots the current light curve. This is called at several stages to plot the de-trending progress as a function of the different *PLD* orders. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance :param str info_left: Information to display at the left of the \ plot. Default `''` :param str info_right: Information to display at the right of the \ plot. Default `''` :param str color: The color of the data points. Default `'b'` ''' # Plot if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(self.apply_mask(self.time), self.apply_mask(self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.5) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.5) else: ax.plot(self.apply_mask(self.time), self.apply_mask( self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) ylim = self.get_ylim() # Plot the outliers, but not the NaNs badmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[self.outmask] def O2(x): return x[badmask] if self.cadence == 'lc': ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.5) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.25) else: ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.25, zorder=-1) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.125, zorder=-1) for i in np.where(self.flux < ylim[0])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(self.flux > ylim[1])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Plot the breakpoints for brkpt in self.breakpoints[:-1]: if len(self.breakpoints) <= 5: ax.axvline(self.time[brkpt], color='r', ls='--', alpha=0.5) else: ax.axvline(self.time[brkpt], color='r', ls='-', alpha=0.025) # Appearance if len(self.cdpp_arr) == 2: ax.annotate('%.2f ppm' % self.cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate('%.2f ppm' % self.cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=10) elif len(self.cdpp_arr) < 6: for n in range(len(self.cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % self.cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=8) else: ax.annotate('%.2f ppm' % self.cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate(info_right, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.annotate(info_left, xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=8) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=5) ax.margins(0.01, 0.1) ax.set_ylim(*ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
python
def plot_lc(self, ax, info_left='', info_right='', color='b'): ''' Plots the current light curve. This is called at several stages to plot the de-trending progress as a function of the different *PLD* orders. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance :param str info_left: Information to display at the left of the \ plot. Default `''` :param str info_right: Information to display at the right of the \ plot. Default `''` :param str color: The color of the data points. Default `'b'` ''' # Plot if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(self.apply_mask(self.time), self.apply_mask(self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.5) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.5) else: ax.plot(self.apply_mask(self.time), self.apply_mask( self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) ylim = self.get_ylim() # Plot the outliers, but not the NaNs badmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[self.outmask] def O2(x): return x[badmask] if self.cadence == 'lc': ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.5) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.25) else: ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.25, zorder=-1) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.125, zorder=-1) for i in np.where(self.flux < ylim[0])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(self.flux > ylim[1])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Plot the breakpoints for brkpt in self.breakpoints[:-1]: if len(self.breakpoints) <= 5: ax.axvline(self.time[brkpt], color='r', ls='--', alpha=0.5) else: ax.axvline(self.time[brkpt], color='r', ls='-', alpha=0.025) # Appearance if len(self.cdpp_arr) == 2: ax.annotate('%.2f ppm' % self.cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate('%.2f ppm' % self.cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=10) elif len(self.cdpp_arr) < 6: for n in range(len(self.cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % self.cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=8) else: ax.annotate('%.2f ppm' % self.cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate(info_right, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.annotate(info_left, xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=8) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=5) ax.margins(0.01, 0.1) ax.set_ylim(*ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
[ "def", "plot_lc", "(", "self", ",", "ax", ",", "info_left", "=", "''", ",", "info_right", "=", "''", ",", "color", "=", "'b'", ")", ":", "# Plot", "if", "(", "self", ".", "cadence", "==", "'lc'", ")", "or", "(", "len", "(", "self", ".", "time", ")", "<", "4000", ")", ":", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "color", ",", "markersize", "=", "2", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "self", ".", "time", "[", "self", ".", "transitmask", "]", ",", "self", ".", "flux", "[", "self", ".", "transitmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "color", ",", "markersize", "=", "2", ",", "alpha", "=", "0.5", ")", "else", ":", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "color", ",", "markersize", "=", "2", ",", "alpha", "=", "0.03", ",", "zorder", "=", "-", "1", ")", "ax", ".", "plot", "(", "self", ".", "time", "[", "self", ".", "transitmask", "]", ",", "self", ".", "flux", "[", "self", ".", "transitmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "color", ",", "markersize", "=", "2", ",", "alpha", "=", "0.03", ",", "zorder", "=", "-", "1", ")", "ax", ".", "set_rasterization_zorder", "(", "0", ")", "ylim", "=", "self", ".", "get_ylim", "(", ")", "# Plot the outliers, but not the NaNs", "badmask", "=", "[", "i", "for", "i", "in", "self", ".", "badmask", "if", "i", "not", "in", "self", ".", "nanmask", "]", "def", "O1", "(", "x", ")", ":", "return", "x", "[", "self", ".", "outmask", "]", "def", "O2", "(", "x", ")", ":", "return", "x", "[", "badmask", "]", "if", "self", ".", "cadence", "==", "'lc'", ":", "ax", ".", "plot", "(", "O1", "(", "self", ".", "time", ")", ",", "O1", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "color", "=", "\"#777777\"", ",", "marker", "=", "'.'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "O2", "(", "self", ".", "time", ")", ",", "O2", "(", "self", ".", "flux", ")", ",", "'r.'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.25", ")", "else", ":", "ax", ".", "plot", "(", "O1", "(", "self", ".", "time", ")", ",", "O1", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "color", "=", "\"#777777\"", ",", "marker", "=", "'.'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.25", ",", "zorder", "=", "-", "1", ")", "ax", ".", "plot", "(", "O2", "(", "self", ".", "time", ")", ",", "O2", "(", "self", ".", "flux", ")", ",", "'r.'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.125", ",", "zorder", "=", "-", "1", ")", "for", "i", "in", "np", ".", "where", "(", "self", ".", "flux", "<", "ylim", "[", "0", "]", ")", "[", "0", "]", ":", "if", "i", "in", "badmask", ":", "color", "=", "\"#ffcccc\"", "elif", "i", "in", "self", ".", "outmask", ":", "color", "=", "\"#cccccc\"", "elif", "i", "in", "self", ".", "nanmask", ":", "continue", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "self", ".", "time", "[", "i", "]", ",", "ylim", "[", "0", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ")", ")", "for", "i", "in", "np", ".", "where", "(", "self", ".", "flux", ">", "ylim", "[", "1", "]", ")", "[", "0", "]", ":", "if", "i", "in", "badmask", ":", "color", "=", "\"#ffcccc\"", "elif", "i", "in", "self", ".", "outmask", ":", "color", "=", "\"#cccccc\"", "elif", "i", "in", "self", ".", "nanmask", ":", "continue", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "self", ".", "time", "[", "i", "]", ",", "ylim", "[", "1", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "-", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ")", ")", "# Plot the breakpoints", "for", "brkpt", "in", "self", ".", "breakpoints", "[", ":", "-", "1", "]", ":", "if", "len", "(", "self", ".", "breakpoints", ")", "<=", "5", ":", "ax", ".", "axvline", "(", "self", ".", "time", "[", "brkpt", "]", ",", "color", "=", "'r'", ",", "ls", "=", "'--'", ",", "alpha", "=", "0.5", ")", "else", ":", "ax", ".", "axvline", "(", "self", ".", "time", "[", "brkpt", "]", ",", "color", "=", "'r'", ",", "ls", "=", "'-'", ",", "alpha", "=", "0.025", ")", "# Appearance", "if", "len", "(", "self", ".", "cdpp_arr", ")", "==", "2", ":", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "self", ".", "cdpp_arr", "[", "0", "]", ",", "xy", "=", "(", "0.02", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "10", ")", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "self", ".", "cdpp_arr", "[", "1", "]", ",", "xy", "=", "(", "0.98", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'top'", ",", "fontsize", "=", "10", ")", "elif", "len", "(", "self", ".", "cdpp_arr", ")", "<", "6", ":", "for", "n", "in", "range", "(", "len", "(", "self", ".", "cdpp_arr", ")", ")", ":", "if", "n", ">", "0", ":", "x", "=", "(", "self", ".", "time", "[", "self", ".", "breakpoints", "[", "n", "-", "1", "]", "]", "-", "self", ".", "time", "[", "0", "]", ")", "/", "(", "self", ".", "time", "[", "-", "1", "]", "-", "self", ".", "time", "[", "0", "]", ")", "+", "0.02", "else", ":", "x", "=", "0.02", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "self", ".", "cdpp_arr", "[", "n", "]", ",", "xy", "=", "(", "x", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "8", ")", "else", ":", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "self", ".", "cdpp", ",", "xy", "=", "(", "0.02", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "10", ")", "ax", ".", "annotate", "(", "info_right", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax", ".", "annotate", "(", "info_left", ",", "xy", "=", "(", "0.02", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "8", ")", "ax", ".", "set_xlabel", "(", "r'Time (%s)'", "%", "self", ".", "_mission", ".", "TIMEUNITS", ",", "fontsize", "=", "5", ")", "ax", ".", "margins", "(", "0.01", ",", "0.1", ")", "ax", ".", "set_ylim", "(", "*", "ylim", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "Flux", ")" ]
Plots the current light curve. This is called at several stages to plot the de-trending progress as a function of the different *PLD* orders. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance :param str info_left: Information to display at the left of the \ plot. Default `''` :param str info_right: Information to display at the right of the \ plot. Default `''` :param str color: The color of the data points. Default `'b'`
[ "Plots", "the", "current", "light", "curve", ".", "This", "is", "called", "at", "several", "stages", "to", "plot", "the", "de", "-", "trending", "progress", "as", "a", "function", "of", "the", "different", "*", "PLD", "*", "orders", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L724-L835
rodluger/everest
everest/detrender.py
Detrender.plot_final
def plot_final(self, ax): ''' Plots the final de-trended light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.3) else: ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0) # Plot the GP (long cadence only) if self.cadence == 'lc': gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) y += med ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5) # Compute the CDPP of the GP-detrended flux self.cdppg = self._mission.CDPP(self.apply_mask( self.flux - y + med), cadence=self.cadence) else: # We're not going to calculate this self.cdppg = 0. # Appearance ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(self.flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
python
def plot_final(self, ax): ''' Plots the final de-trended light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.3) else: ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0) # Plot the GP (long cadence only) if self.cadence == 'lc': gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) y += med ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5) # Compute the CDPP of the GP-detrended flux self.cdppg = self._mission.CDPP(self.apply_mask( self.flux - y + med), cadence=self.cadence) else: # We're not going to calculate this self.cdppg = 0. # Appearance ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(self.flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
[ "def", "plot_final", "(", "self", ",", "ax", ")", ":", "# Plot the light curve", "bnmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "badmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "def", "M", "(", "x", ")", ":", "return", "np", ".", "delete", "(", "x", ",", "bnmask", ")", "if", "(", "self", ".", "cadence", "==", "'lc'", ")", "or", "(", "len", "(", "self", ".", "time", ")", "<", "4000", ")", ":", "ax", ".", "plot", "(", "M", "(", "self", ".", "time", ")", ",", "M", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.3", ")", "else", ":", "ax", ".", "plot", "(", "M", "(", "self", ".", "time", ")", ",", "M", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.03", ",", "zorder", "=", "-", "1", ")", "ax", ".", "set_rasterization_zorder", "(", "0", ")", "# Hack: Plot invisible first and last points to ensure", "# the x axis limits are the", "# same in the other plots, where we also plot outliers!", "ax", ".", "plot", "(", "self", ".", "time", "[", "0", "]", ",", "np", ".", "nanmedian", "(", "M", "(", "self", ".", "flux", ")", ")", ",", "marker", "=", "'.'", ",", "alpha", "=", "0", ")", "ax", ".", "plot", "(", "self", ".", "time", "[", "-", "1", "]", ",", "np", ".", "nanmedian", "(", "M", "(", "self", ".", "flux", ")", ")", ",", "marker", "=", "'.'", ",", "alpha", "=", "0", ")", "# Plot the GP (long cadence only)", "if", "self", ".", "cadence", "==", "'lc'", ":", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "white", "=", "False", ")", "gp", ".", "compute", "(", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "fraw_err", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ")", "y", ",", "_", "=", "gp", ".", "predict", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", "-", "med", ",", "self", ".", "time", ")", "y", "+=", "med", "ax", ".", "plot", "(", "M", "(", "self", ".", "time", ")", ",", "M", "(", "y", ")", ",", "'r-'", ",", "lw", "=", "0.5", ",", "alpha", "=", "0.5", ")", "# Compute the CDPP of the GP-detrended flux", "self", ".", "cdppg", "=", "self", ".", "_mission", ".", "CDPP", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", "-", "y", "+", "med", ")", ",", "cadence", "=", "self", ".", "cadence", ")", "else", ":", "# We're not going to calculate this", "self", ".", "cdppg", "=", "0.", "# Appearance", "ax", ".", "annotate", "(", "'Final'", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax", ".", "margins", "(", "0.01", ",", "0.1", ")", "# Get y lims that bound 99% of the flux", "flux", "=", "np", ".", "delete", "(", "self", ".", "flux", ",", "bnmask", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "flux", ")", ")", "hi", ",", "lo", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "fsort", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "Flux", ")" ]
Plots the final de-trended light curve.
[ "Plots", "the", "final", "de", "-", "trended", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L837-L894
rodluger/everest
everest/detrender.py
Detrender.plot_cbv
def plot_cbv(self, ax, flux, info, show_cbv=False): ''' Plots the final CBV-corrected light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if self.cadence == 'lc': ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.45) else: ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(flux)), marker='.', alpha=0) # Show CBV fit? if show_cbv: ax.plot(self.time, self._mission.FitCBVs( self) + np.nanmedian(flux), 'r-', alpha=0.2) # Appearance ax.annotate(info, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.2 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=9) for tick in ax.get_xticklabels() + ax.get_yticklabels(): tick.set_fontsize(7)
python
def plot_cbv(self, ax, flux, info, show_cbv=False): ''' Plots the final CBV-corrected light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if self.cadence == 'lc': ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.45) else: ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(flux)), marker='.', alpha=0) # Show CBV fit? if show_cbv: ax.plot(self.time, self._mission.FitCBVs( self) + np.nanmedian(flux), 'r-', alpha=0.2) # Appearance ax.annotate(info, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.2 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=9) for tick in ax.get_xticklabels() + ax.get_yticklabels(): tick.set_fontsize(7)
[ "def", "plot_cbv", "(", "self", ",", "ax", ",", "flux", ",", "info", ",", "show_cbv", "=", "False", ")", ":", "# Plot the light curve", "bnmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "badmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "def", "M", "(", "x", ")", ":", "return", "np", ".", "delete", "(", "x", ",", "bnmask", ")", "if", "self", ".", "cadence", "==", "'lc'", ":", "ax", ".", "plot", "(", "M", "(", "self", ".", "time", ")", ",", "M", "(", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.45", ")", "else", ":", "ax", ".", "plot", "(", "M", "(", "self", ".", "time", ")", ",", "M", "(", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "2", ",", "alpha", "=", "0.03", ",", "zorder", "=", "-", "1", ")", "ax", ".", "set_rasterization_zorder", "(", "0", ")", "# Hack: Plot invisible first and last points to ensure", "# the x axis limits are the", "# same in the other plots, where we also plot outliers!", "ax", ".", "plot", "(", "self", ".", "time", "[", "0", "]", ",", "np", ".", "nanmedian", "(", "M", "(", "flux", ")", ")", ",", "marker", "=", "'.'", ",", "alpha", "=", "0", ")", "ax", ".", "plot", "(", "self", ".", "time", "[", "-", "1", "]", ",", "np", ".", "nanmedian", "(", "M", "(", "flux", ")", ")", ",", "marker", "=", "'.'", ",", "alpha", "=", "0", ")", "# Show CBV fit?", "if", "show_cbv", ":", "ax", ".", "plot", "(", "self", ".", "time", ",", "self", ".", "_mission", ".", "FitCBVs", "(", "self", ")", "+", "np", ".", "nanmedian", "(", "flux", ")", ",", "'r-'", ",", "alpha", "=", "0.2", ")", "# Appearance", "ax", ".", "annotate", "(", "info", ",", "xy", "=", "(", "0.98", ",", "0.025", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ",", "alpha", "=", "0.5", ",", "fontweight", "=", "'bold'", ")", "ax", ".", "margins", "(", "0.01", ",", "0.1", ")", "# Get y lims that bound 99% of the flux", "flux", "=", "np", ".", "delete", "(", "flux", ",", "bnmask", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "flux", ")", ")", "hi", ",", "lo", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "fsort", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.2", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "Flux", ")", "ax", ".", "set_xlabel", "(", "r'Time (%s)'", "%", "self", ".", "_mission", ".", "TIMEUNITS", ",", "fontsize", "=", "9", ")", "for", "tick", "in", "ax", ".", "get_xticklabels", "(", ")", "+", "ax", ".", "get_yticklabels", "(", ")", ":", "tick", ".", "set_fontsize", "(", "7", ")" ]
Plots the final CBV-corrected light curve.
[ "Plots", "the", "final", "CBV", "-", "corrected", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L896-L942
rodluger/everest
everest/detrender.py
Detrender.load_tpf
def load_tpf(self): ''' Loads the target pixel file. ''' if not self.loaded: if self._data is not None: data = self._data else: data = self._mission.GetData( self.ID, season=self.season, cadence=self.cadence, clobber=self.clobber_tpf, aperture_name=self.aperture_name, saturated_aperture_name=self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance=self.saturation_tolerance, get_hires=self.get_hires, get_nearby=self.get_nearby) if data is None: raise Exception("Unable to retrieve target data.") self.cadn = data.cadn self.time = data.time self.model = np.zeros_like(self.time) self.fpix = data.fpix self.fraw = np.sum(self.fpix, axis=1) self.fpix_err = data.fpix_err self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1)) self.nanmask = data.nanmask self.badmask = data.badmask self.transitmask = np.array([], dtype=int) self.outmask = np.array([], dtype=int) self.aperture = data.aperture self.aperture_name = data.aperture_name self.apertures = data.apertures self.quality = data.quality self.Xpos = data.Xpos self.Ypos = data.Ypos self.mag = data.mag self.pixel_images = data.pixel_images self.nearby = data.nearby self.hires = data.hires self.saturated = data.saturated self.meta = data.meta self.bkg = data.bkg # Update the last breakpoint to the correct value self.breakpoints[-1] = len(self.time) - 1 # Get PLD normalization self.get_norm() self.loaded = True
python
def load_tpf(self): ''' Loads the target pixel file. ''' if not self.loaded: if self._data is not None: data = self._data else: data = self._mission.GetData( self.ID, season=self.season, cadence=self.cadence, clobber=self.clobber_tpf, aperture_name=self.aperture_name, saturated_aperture_name=self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance=self.saturation_tolerance, get_hires=self.get_hires, get_nearby=self.get_nearby) if data is None: raise Exception("Unable to retrieve target data.") self.cadn = data.cadn self.time = data.time self.model = np.zeros_like(self.time) self.fpix = data.fpix self.fraw = np.sum(self.fpix, axis=1) self.fpix_err = data.fpix_err self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1)) self.nanmask = data.nanmask self.badmask = data.badmask self.transitmask = np.array([], dtype=int) self.outmask = np.array([], dtype=int) self.aperture = data.aperture self.aperture_name = data.aperture_name self.apertures = data.apertures self.quality = data.quality self.Xpos = data.Xpos self.Ypos = data.Ypos self.mag = data.mag self.pixel_images = data.pixel_images self.nearby = data.nearby self.hires = data.hires self.saturated = data.saturated self.meta = data.meta self.bkg = data.bkg # Update the last breakpoint to the correct value self.breakpoints[-1] = len(self.time) - 1 # Get PLD normalization self.get_norm() self.loaded = True
[ "def", "load_tpf", "(", "self", ")", ":", "if", "not", "self", ".", "loaded", ":", "if", "self", ".", "_data", "is", "not", "None", ":", "data", "=", "self", ".", "_data", "else", ":", "data", "=", "self", ".", "_mission", ".", "GetData", "(", "self", ".", "ID", ",", "season", "=", "self", ".", "season", ",", "cadence", "=", "self", ".", "cadence", ",", "clobber", "=", "self", ".", "clobber_tpf", ",", "aperture_name", "=", "self", ".", "aperture_name", ",", "saturated_aperture_name", "=", "self", ".", "saturated_aperture_name", ",", "max_pixels", "=", "self", ".", "max_pixels", ",", "saturation_tolerance", "=", "self", ".", "saturation_tolerance", ",", "get_hires", "=", "self", ".", "get_hires", ",", "get_nearby", "=", "self", ".", "get_nearby", ")", "if", "data", "is", "None", ":", "raise", "Exception", "(", "\"Unable to retrieve target data.\"", ")", "self", ".", "cadn", "=", "data", ".", "cadn", "self", ".", "time", "=", "data", ".", "time", "self", ".", "model", "=", "np", ".", "zeros_like", "(", "self", ".", "time", ")", "self", ".", "fpix", "=", "data", ".", "fpix", "self", ".", "fraw", "=", "np", ".", "sum", "(", "self", ".", "fpix", ",", "axis", "=", "1", ")", "self", ".", "fpix_err", "=", "data", ".", "fpix_err", "self", ".", "fraw_err", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "self", ".", "fpix_err", "**", "2", ",", "axis", "=", "1", ")", ")", "self", ".", "nanmask", "=", "data", ".", "nanmask", "self", ".", "badmask", "=", "data", ".", "badmask", "self", ".", "transitmask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "self", ".", "outmask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "self", ".", "aperture", "=", "data", ".", "aperture", "self", ".", "aperture_name", "=", "data", ".", "aperture_name", "self", ".", "apertures", "=", "data", ".", "apertures", "self", ".", "quality", "=", "data", ".", "quality", "self", ".", "Xpos", "=", "data", ".", "Xpos", "self", ".", "Ypos", "=", "data", ".", "Ypos", "self", ".", "mag", "=", "data", ".", "mag", "self", ".", "pixel_images", "=", "data", ".", "pixel_images", "self", ".", "nearby", "=", "data", ".", "nearby", "self", ".", "hires", "=", "data", ".", "hires", "self", ".", "saturated", "=", "data", ".", "saturated", "self", ".", "meta", "=", "data", ".", "meta", "self", ".", "bkg", "=", "data", ".", "bkg", "# Update the last breakpoint to the correct value", "self", ".", "breakpoints", "[", "-", "1", "]", "=", "len", "(", "self", ".", "time", ")", "-", "1", "# Get PLD normalization", "self", ".", "get_norm", "(", ")", "self", ".", "loaded", "=", "True" ]
Loads the target pixel file.
[ "Loads", "the", "target", "pixel", "file", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L944-L997
rodluger/everest
everest/detrender.py
Detrender.load_model
def load_model(self, name=None): ''' Loads a saved version of the model. ''' if self.clobber: return False if name is None: name = self.name file = os.path.join(self.dir, '%s.npz' % name) if os.path.exists(file): if not self.is_parent: log.info("Loading '%s.npz'..." % name) try: data = np.load(file) for key in data.keys(): try: setattr(self, key, data[key][()]) except NotImplementedError: pass # HACK: Backwards compatibility. Previous version stored # the CDPP in the `cdpp6` # and `cdpp6_arr` attributes. Let's move them over. if hasattr(self, 'cdpp6'): self.cdpp = self.cdpp6 del self.cdpp6 if hasattr(self, 'cdpp6_arr'): self.cdpp_arr = np.array(self.cdpp6_arr) del self.cdpp6_arr if hasattr(self, 'gppp'): self.cdppg = self.gppp del self.gppp # HACK: At one point we were saving the figure instances, # so loading the .npz # opened a plotting window. I don't think this is the case # any more, so this # next line should be removed in the future... pl.close() return True except: log.warn("Error loading '%s.npz'." % name) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.warn(ln) os.rename(file, file + '.bad') if self.is_parent: raise Exception( 'Unable to load `%s` model for target %d.' % (self.name, self.ID)) return False
python
def load_model(self, name=None): ''' Loads a saved version of the model. ''' if self.clobber: return False if name is None: name = self.name file = os.path.join(self.dir, '%s.npz' % name) if os.path.exists(file): if not self.is_parent: log.info("Loading '%s.npz'..." % name) try: data = np.load(file) for key in data.keys(): try: setattr(self, key, data[key][()]) except NotImplementedError: pass # HACK: Backwards compatibility. Previous version stored # the CDPP in the `cdpp6` # and `cdpp6_arr` attributes. Let's move them over. if hasattr(self, 'cdpp6'): self.cdpp = self.cdpp6 del self.cdpp6 if hasattr(self, 'cdpp6_arr'): self.cdpp_arr = np.array(self.cdpp6_arr) del self.cdpp6_arr if hasattr(self, 'gppp'): self.cdppg = self.gppp del self.gppp # HACK: At one point we were saving the figure instances, # so loading the .npz # opened a plotting window. I don't think this is the case # any more, so this # next line should be removed in the future... pl.close() return True except: log.warn("Error loading '%s.npz'." % name) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.warn(ln) os.rename(file, file + '.bad') if self.is_parent: raise Exception( 'Unable to load `%s` model for target %d.' % (self.name, self.ID)) return False
[ "def", "load_model", "(", "self", ",", "name", "=", "None", ")", ":", "if", "self", ".", "clobber", ":", "return", "False", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "'%s.npz'", "%", "name", ")", "if", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "if", "not", "self", ".", "is_parent", ":", "log", ".", "info", "(", "\"Loading '%s.npz'...\"", "%", "name", ")", "try", ":", "data", "=", "np", ".", "load", "(", "file", ")", "for", "key", "in", "data", ".", "keys", "(", ")", ":", "try", ":", "setattr", "(", "self", ",", "key", ",", "data", "[", "key", "]", "[", "(", ")", "]", ")", "except", "NotImplementedError", ":", "pass", "# HACK: Backwards compatibility. Previous version stored", "# the CDPP in the `cdpp6`", "# and `cdpp6_arr` attributes. Let's move them over.", "if", "hasattr", "(", "self", ",", "'cdpp6'", ")", ":", "self", ".", "cdpp", "=", "self", ".", "cdpp6", "del", "self", ".", "cdpp6", "if", "hasattr", "(", "self", ",", "'cdpp6_arr'", ")", ":", "self", ".", "cdpp_arr", "=", "np", ".", "array", "(", "self", ".", "cdpp6_arr", ")", "del", "self", ".", "cdpp6_arr", "if", "hasattr", "(", "self", ",", "'gppp'", ")", ":", "self", ".", "cdppg", "=", "self", ".", "gppp", "del", "self", ".", "gppp", "# HACK: At one point we were saving the figure instances,", "# so loading the .npz", "# opened a plotting window. I don't think this is the case", "# any more, so this", "# next line should be removed in the future...", "pl", ".", "close", "(", ")", "return", "True", "except", ":", "log", ".", "warn", "(", "\"Error loading '%s.npz'.\"", "%", "name", ")", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "ln", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "warn", "(", "ln", ")", "os", ".", "rename", "(", "file", ",", "file", "+", "'.bad'", ")", "if", "self", ".", "is_parent", ":", "raise", "Exception", "(", "'Unable to load `%s` model for target %d.'", "%", "(", "self", ".", "name", ",", "self", ".", "ID", ")", ")", "return", "False" ]
Loads a saved version of the model.
[ "Loads", "a", "saved", "version", "of", "the", "model", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L999-L1056
rodluger/everest
everest/detrender.py
Detrender.save_model
def save_model(self): ''' Saves all of the de-trending information to disk in an `npz` file and saves the DVS as a `pdf`. ''' # Save the data log.info("Saving data to '%s.npz'..." % self.name) d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) d.pop('transit_model', None) d.pop('_transit_model', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d) # Save the DVS pdf = PdfPages(os.path.join(self.dir, self.name + '.pdf')) pdf.savefig(self.dvs.fig) pl.close(self.dvs.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close()
python
def save_model(self): ''' Saves all of the de-trending information to disk in an `npz` file and saves the DVS as a `pdf`. ''' # Save the data log.info("Saving data to '%s.npz'..." % self.name) d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) d.pop('transit_model', None) d.pop('_transit_model', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d) # Save the DVS pdf = PdfPages(os.path.join(self.dir, self.name + '.pdf')) pdf.savefig(self.dvs.fig) pl.close(self.dvs.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close()
[ "def", "save_model", "(", "self", ")", ":", "# Save the data", "log", ".", "info", "(", "\"Saving data to '%s.npz'...\"", "%", "self", ".", "name", ")", "d", "=", "dict", "(", "self", ".", "__dict__", ")", "d", ".", "pop", "(", "'_weights'", ",", "None", ")", "d", ".", "pop", "(", "'_A'", ",", "None", ")", "d", ".", "pop", "(", "'_B'", ",", "None", ")", "d", ".", "pop", "(", "'_f'", ",", "None", ")", "d", ".", "pop", "(", "'_mK'", ",", "None", ")", "d", ".", "pop", "(", "'K'", ",", "None", ")", "d", ".", "pop", "(", "'dvs'", ",", "None", ")", "d", ".", "pop", "(", "'clobber'", ",", "None", ")", "d", ".", "pop", "(", "'clobber_tpf'", ",", "None", ")", "d", ".", "pop", "(", "'_mission'", ",", "None", ")", "d", ".", "pop", "(", "'debug'", ",", "None", ")", "d", ".", "pop", "(", "'transit_model'", ",", "None", ")", "d", ".", "pop", "(", "'_transit_model'", ",", "None", ")", "np", ".", "savez", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.npz'", ")", ",", "*", "*", "d", ")", "# Save the DVS", "pdf", "=", "PdfPages", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.pdf'", ")", ")", "pdf", ".", "savefig", "(", "self", ".", "dvs", ".", "fig", ")", "pl", ".", "close", "(", "self", ".", "dvs", ".", "fig", ")", "d", "=", "pdf", ".", "infodict", "(", ")", "d", "[", "'Title'", "]", "=", "'EVEREST: %s de-trending of %s %d'", "%", "(", "self", ".", "name", ",", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", "d", "[", "'Author'", "]", "=", "'Rodrigo Luger'", "pdf", ".", "close", "(", ")" ]
Saves all of the de-trending information to disk in an `npz` file and saves the DVS as a `pdf`.
[ "Saves", "all", "of", "the", "de", "-", "trending", "information", "to", "disk", "in", "an", "npz", "file", "and", "saves", "the", "DVS", "as", "a", "pdf", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1058-L1091
rodluger/everest
everest/detrender.py
Detrender.exception_handler
def exception_handler(self, pdb): ''' A custom exception handler. :param pdb: If :py:obj:`True`, enters PDB post-mortem \ mode for debugging. ''' # Grab the exception exctype, value, tb = sys.exc_info() # Log the error and create a .err file errfile = os.path.join(self.dir, self.name + '.err') with open(errfile, 'w') as f: for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) for line in traceback.format_tb(tb): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) # Re-raise? if pdb: raise
python
def exception_handler(self, pdb): ''' A custom exception handler. :param pdb: If :py:obj:`True`, enters PDB post-mortem \ mode for debugging. ''' # Grab the exception exctype, value, tb = sys.exc_info() # Log the error and create a .err file errfile = os.path.join(self.dir, self.name + '.err') with open(errfile, 'w') as f: for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) for line in traceback.format_tb(tb): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) # Re-raise? if pdb: raise
[ "def", "exception_handler", "(", "self", ",", "pdb", ")", ":", "# Grab the exception", "exctype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "# Log the error and create a .err file", "errfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.err'", ")", "with", "open", "(", "errfile", ",", "'w'", ")", "as", "f", ":", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "ln", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "ln", ")", "print", "(", "ln", ",", "file", "=", "f", ")", "for", "line", "in", "traceback", ".", "format_tb", "(", "tb", ")", ":", "ln", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "log", ".", "error", "(", "ln", ")", "print", "(", "ln", ",", "file", "=", "f", ")", "# Re-raise?", "if", "pdb", ":", "raise" ]
A custom exception handler. :param pdb: If :py:obj:`True`, enters PDB post-mortem \ mode for debugging.
[ "A", "custom", "exception", "handler", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1093-L1119
rodluger/everest
everest/detrender.py
Detrender.update_gp
def update_gp(self): ''' Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the covariance matrix for the regression. ''' self.kernel_params = GetKernelParams(self.time, self.flux, self.fraw_err, mask=self.mask, guess=self.kernel_params, kernel=self.kernel, giter=self.giter, gmaxf=self.gmaxf)
python
def update_gp(self): ''' Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the covariance matrix for the regression. ''' self.kernel_params = GetKernelParams(self.time, self.flux, self.fraw_err, mask=self.mask, guess=self.kernel_params, kernel=self.kernel, giter=self.giter, gmaxf=self.gmaxf)
[ "def", "update_gp", "(", "self", ")", ":", "self", ".", "kernel_params", "=", "GetKernelParams", "(", "self", ".", "time", ",", "self", ".", "flux", ",", "self", ".", "fraw_err", ",", "mask", "=", "self", ".", "mask", ",", "guess", "=", "self", ".", "kernel_params", ",", "kernel", "=", "self", ".", "kernel", ",", "giter", "=", "self", ".", "giter", ",", "gmaxf", "=", "self", ".", "gmaxf", ")" ]
Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the covariance matrix for the regression.
[ "Calls", ":", "py", ":", "func", ":", "gp", ".", "GetKernelParams", "to", "optimize", "the", "GP", "and", "obtain", "the", "covariance", "matrix", "for", "the", "regression", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1121-L1134
rodluger/everest
everest/detrender.py
Detrender.init_kernel
def init_kernel(self): ''' Initializes the covariance matrix with a guess at the GP kernel parameters. ''' if self.kernel_params is None: X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1)) y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve( np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux)))) white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)]) amp = self.gp_factor * np.nanstd(y) tau = 30.0 if self.kernel == 'Basic': self.kernel_params = [white, amp, tau] elif self.kernel == 'QuasiPeriodic': self.kernel_params = [white, amp, 1., 20.]
python
def init_kernel(self): ''' Initializes the covariance matrix with a guess at the GP kernel parameters. ''' if self.kernel_params is None: X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1)) y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve( np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux)))) white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)]) amp = self.gp_factor * np.nanstd(y) tau = 30.0 if self.kernel == 'Basic': self.kernel_params = [white, amp, tau] elif self.kernel == 'QuasiPeriodic': self.kernel_params = [white, amp, 1., 20.]
[ "def", "init_kernel", "(", "self", ")", ":", "if", "self", ".", "kernel_params", "is", "None", ":", "X", "=", "self", ".", "apply_mask", "(", "self", ".", "fpix", "/", "self", ".", "flux", ".", "reshape", "(", "-", "1", ",", "1", ")", ")", "y", "=", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", "-", "np", ".", "dot", "(", "X", ",", "np", ".", "linalg", ".", "solve", "(", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", ",", "np", ".", "dot", "(", "X", ".", "T", ",", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ")", ")", ")", "white", "=", "np", ".", "nanmedian", "(", "[", "np", ".", "nanstd", "(", "c", ")", "for", "c", "in", "Chunks", "(", "y", ",", "13", ")", "]", ")", "amp", "=", "self", ".", "gp_factor", "*", "np", ".", "nanstd", "(", "y", ")", "tau", "=", "30.0", "if", "self", ".", "kernel", "==", "'Basic'", ":", "self", ".", "kernel_params", "=", "[", "white", ",", "amp", ",", "tau", "]", "elif", "self", ".", "kernel", "==", "'QuasiPeriodic'", ":", "self", ".", "kernel_params", "=", "[", "white", ",", "amp", ",", "1.", ",", "20.", "]" ]
Initializes the covariance matrix with a guess at the GP kernel parameters.
[ "Initializes", "the", "covariance", "matrix", "with", "a", "guess", "at", "the", "GP", "kernel", "parameters", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1136-L1153
rodluger/everest
everest/detrender.py
Detrender.run
def run(self): ''' Runs the de-trending step. ''' try: # Load raw data log.info("Loading target data...") self.load_tpf() self.mask_planets() self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.init_kernel() M = self.apply_mask(np.arange(len(self.time))) self.cdppr_arr = self.get_cdpp_arr() self.cdpp_arr = np.array(self.cdppr_arr) self.cdppv_arr = np.array(self.cdppr_arr) self.cdppr = self.get_cdpp() self.cdpp = self.cdppr self.cdppv = self.cdppr log.info("%s (Raw): CDPP = %s" % (self.name, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='Raw', color='k') # Loop for n in range(self.pld_order): self.lam_idx += 1 self.get_outliers() if n > 0 and self.optimize_gp: self.update_gp() self.cross_validate(self.dvs.right(), info='CV%d' % n) self.cdpp_arr = self.get_cdpp_arr() self.cdppv_arr *= self.cdpp_arr self.cdpp = self.get_cdpp() self.cdppv = np.nanmean(self.cdppv_arr) log.info("%s (%d/%d): CDPP = %s" % (self.name, n + 1, self.pld_order, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='LC%d' % ( n + 1), info_left='%d outliers' % len(self.outmask)) # Save self.finalize() self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)
python
def run(self): ''' Runs the de-trending step. ''' try: # Load raw data log.info("Loading target data...") self.load_tpf() self.mask_planets() self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.init_kernel() M = self.apply_mask(np.arange(len(self.time))) self.cdppr_arr = self.get_cdpp_arr() self.cdpp_arr = np.array(self.cdppr_arr) self.cdppv_arr = np.array(self.cdppr_arr) self.cdppr = self.get_cdpp() self.cdpp = self.cdppr self.cdppv = self.cdppr log.info("%s (Raw): CDPP = %s" % (self.name, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='Raw', color='k') # Loop for n in range(self.pld_order): self.lam_idx += 1 self.get_outliers() if n > 0 and self.optimize_gp: self.update_gp() self.cross_validate(self.dvs.right(), info='CV%d' % n) self.cdpp_arr = self.get_cdpp_arr() self.cdppv_arr *= self.cdpp_arr self.cdpp = self.get_cdpp() self.cdppv = np.nanmean(self.cdppv_arr) log.info("%s (%d/%d): CDPP = %s" % (self.name, n + 1, self.pld_order, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='LC%d' % ( n + 1), info_left='%d outliers' % len(self.outmask)) # Save self.finalize() self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)
[ "def", "run", "(", "self", ")", ":", "try", ":", "# Load raw data", "log", ".", "info", "(", "\"Loading target data...\"", ")", "self", ".", "load_tpf", "(", ")", "self", ".", "mask_planets", "(", ")", "self", ".", "plot_aperture", "(", "[", "self", ".", "dvs", ".", "top_right", "(", ")", "for", "i", "in", "range", "(", "4", ")", "]", ")", "self", ".", "init_kernel", "(", ")", "M", "=", "self", ".", "apply_mask", "(", "np", ".", "arange", "(", "len", "(", "self", ".", "time", ")", ")", ")", "self", ".", "cdppr_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "self", ".", "cdpp_arr", "=", "np", ".", "array", "(", "self", ".", "cdppr_arr", ")", "self", ".", "cdppv_arr", "=", "np", ".", "array", "(", "self", ".", "cdppr_arr", ")", "self", ".", "cdppr", "=", "self", ".", "get_cdpp", "(", ")", "self", ".", "cdpp", "=", "self", ".", "cdppr", "self", ".", "cdppv", "=", "self", ".", "cdppr", "log", ".", "info", "(", "\"%s (Raw): CDPP = %s\"", "%", "(", "self", ".", "name", ",", "self", ".", "cdpps", ")", ")", "self", ".", "plot_lc", "(", "self", ".", "dvs", ".", "left", "(", ")", ",", "info_right", "=", "'Raw'", ",", "color", "=", "'k'", ")", "# Loop", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "self", ".", "lam_idx", "+=", "1", "self", ".", "get_outliers", "(", ")", "if", "n", ">", "0", "and", "self", ".", "optimize_gp", ":", "self", ".", "update_gp", "(", ")", "self", ".", "cross_validate", "(", "self", ".", "dvs", ".", "right", "(", ")", ",", "info", "=", "'CV%d'", "%", "n", ")", "self", ".", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "self", ".", "cdppv_arr", "*=", "self", ".", "cdpp_arr", "self", ".", "cdpp", "=", "self", ".", "get_cdpp", "(", ")", "self", ".", "cdppv", "=", "np", ".", "nanmean", "(", "self", ".", "cdppv_arr", ")", "log", ".", "info", "(", "\"%s (%d/%d): CDPP = %s\"", "%", "(", "self", ".", "name", ",", "n", "+", "1", ",", "self", ".", "pld_order", ",", "self", ".", "cdpps", ")", ")", "self", ".", "plot_lc", "(", "self", ".", "dvs", ".", "left", "(", ")", ",", "info_right", "=", "'LC%d'", "%", "(", "n", "+", "1", ")", ",", "info_left", "=", "'%d outliers'", "%", "len", "(", "self", ".", "outmask", ")", ")", "# Save", "self", ".", "finalize", "(", ")", "self", ".", "plot_final", "(", "self", ".", "dvs", ".", "top_left", "(", ")", ")", "self", ".", "plot_info", "(", "self", ".", "dvs", ")", "self", ".", "save_model", "(", ")", "except", ":", "self", ".", "exception_handler", "(", "self", ".", "debug", ")" ]
Runs the de-trending step.
[ "Runs", "the", "de", "-", "trending", "step", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1170-L1219
rodluger/everest
everest/detrender.py
Detrender.publish
def publish(self, **kwargs): ''' Correct the light curve with the CBVs, generate a cover page for the DVS figure, and produce a FITS file for publication. ''' try: # HACK: Force these params for publication self.cbv_win = 999 self.cbv_order = 3 self.cbv_num = 1 # Get the CBVs self._mission.GetTargetCBVs(self) # Plot the final corrected light curve cbv = CBV() self.plot_info(cbv) self.plot_cbv(cbv.body(), self.fcor, 'Corrected') self.plot_cbv(cbv.body(), self.flux, 'De-trended', show_cbv=True) self.plot_cbv(cbv.body(), self.fraw, 'Raw') # Save the CBV pdf pdf = PdfPages(os.path.join(self.dir, 'cbv.pdf')) pdf.savefig(cbv.fig) pl.close(cbv.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close() # Now merge the two PDFs assert os.path.exists(os.path.join( self.dir, self.name + '.pdf')), \ "Unable to locate %s.pdf." % self.name output = PdfFileWriter() pdfOne = PdfFileReader(os.path.join(self.dir, 'cbv.pdf')) pdfTwo = PdfFileReader(os.path.join(self.dir, self.name + '.pdf')) # Add the CBV page output.addPage(pdfOne.getPage(0)) # Add the original DVS page output.addPage(pdfTwo.getPage(pdfTwo.numPages - 1)) # Write the final PDF outputStream = open(os.path.join(self.dir, self._mission.DVSFile( self.ID, self.season, self.cadence)), "wb") output.write(outputStream) outputStream.close() os.remove(os.path.join(self.dir, 'cbv.pdf')) # Make the FITS file MakeFITS(self) except: self.exception_handler(self.debug)
python
def publish(self, **kwargs): ''' Correct the light curve with the CBVs, generate a cover page for the DVS figure, and produce a FITS file for publication. ''' try: # HACK: Force these params for publication self.cbv_win = 999 self.cbv_order = 3 self.cbv_num = 1 # Get the CBVs self._mission.GetTargetCBVs(self) # Plot the final corrected light curve cbv = CBV() self.plot_info(cbv) self.plot_cbv(cbv.body(), self.fcor, 'Corrected') self.plot_cbv(cbv.body(), self.flux, 'De-trended', show_cbv=True) self.plot_cbv(cbv.body(), self.fraw, 'Raw') # Save the CBV pdf pdf = PdfPages(os.path.join(self.dir, 'cbv.pdf')) pdf.savefig(cbv.fig) pl.close(cbv.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close() # Now merge the two PDFs assert os.path.exists(os.path.join( self.dir, self.name + '.pdf')), \ "Unable to locate %s.pdf." % self.name output = PdfFileWriter() pdfOne = PdfFileReader(os.path.join(self.dir, 'cbv.pdf')) pdfTwo = PdfFileReader(os.path.join(self.dir, self.name + '.pdf')) # Add the CBV page output.addPage(pdfOne.getPage(0)) # Add the original DVS page output.addPage(pdfTwo.getPage(pdfTwo.numPages - 1)) # Write the final PDF outputStream = open(os.path.join(self.dir, self._mission.DVSFile( self.ID, self.season, self.cadence)), "wb") output.write(outputStream) outputStream.close() os.remove(os.path.join(self.dir, 'cbv.pdf')) # Make the FITS file MakeFITS(self) except: self.exception_handler(self.debug)
[ "def", "publish", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "# HACK: Force these params for publication", "self", ".", "cbv_win", "=", "999", "self", ".", "cbv_order", "=", "3", "self", ".", "cbv_num", "=", "1", "# Get the CBVs", "self", ".", "_mission", ".", "GetTargetCBVs", "(", "self", ")", "# Plot the final corrected light curve", "cbv", "=", "CBV", "(", ")", "self", ".", "plot_info", "(", "cbv", ")", "self", ".", "plot_cbv", "(", "cbv", ".", "body", "(", ")", ",", "self", ".", "fcor", ",", "'Corrected'", ")", "self", ".", "plot_cbv", "(", "cbv", ".", "body", "(", ")", ",", "self", ".", "flux", ",", "'De-trended'", ",", "show_cbv", "=", "True", ")", "self", ".", "plot_cbv", "(", "cbv", ".", "body", "(", ")", ",", "self", ".", "fraw", ",", "'Raw'", ")", "# Save the CBV pdf", "pdf", "=", "PdfPages", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "'cbv.pdf'", ")", ")", "pdf", ".", "savefig", "(", "cbv", ".", "fig", ")", "pl", ".", "close", "(", "cbv", ".", "fig", ")", "d", "=", "pdf", ".", "infodict", "(", ")", "d", "[", "'Title'", "]", "=", "'EVEREST: %s de-trending of %s %d'", "%", "(", "self", ".", "name", ",", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", "d", "[", "'Author'", "]", "=", "'Rodrigo Luger'", "pdf", ".", "close", "(", ")", "# Now merge the two PDFs", "assert", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.pdf'", ")", ")", ",", "\"Unable to locate %s.pdf.\"", "%", "self", ".", "name", "output", "=", "PdfFileWriter", "(", ")", "pdfOne", "=", "PdfFileReader", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "'cbv.pdf'", ")", ")", "pdfTwo", "=", "PdfFileReader", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.pdf'", ")", ")", "# Add the CBV page", "output", ".", "addPage", "(", "pdfOne", ".", "getPage", "(", "0", ")", ")", "# Add the original DVS page", "output", ".", "addPage", "(", "pdfTwo", ".", "getPage", "(", "pdfTwo", ".", "numPages", "-", "1", ")", ")", "# Write the final PDF", "outputStream", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "_mission", ".", "DVSFile", "(", "self", ".", "ID", ",", "self", ".", "season", ",", "self", ".", "cadence", ")", ")", ",", "\"wb\"", ")", "output", ".", "write", "(", "outputStream", ")", "outputStream", ".", "close", "(", ")", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "'cbv.pdf'", ")", ")", "# Make the FITS file", "MakeFITS", "(", "self", ")", "except", ":", "self", ".", "exception_handler", "(", "self", ".", "debug", ")" ]
Correct the light curve with the CBVs, generate a cover page for the DVS figure, and produce a FITS file for publication.
[ "Correct", "the", "light", "curve", "with", "the", "CBVs", "generate", "a", "cover", "page", "for", "the", "DVS", "figure", "and", "produce", "a", "FITS", "file", "for", "publication", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1221-L1279
rodluger/everest
everest/detrender.py
nPLD.setup
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param tuple cdpp_range: If :py:obj:`parent_model` is set, \ neighbors are selected only if \ their de-trended CDPPs fall within this range. Default `None` :param tuple mag_range: Only select neighbors whose magnitudes are \ within this range. Default (11., 13.) :param int neighbors: The number of neighboring stars to use in \ the de-trending. The higher this number, the more signals \ there are and hence the more de-trending information there is. \ However, the neighboring star signals are regularized together \ with the target's signals, so adding too many neighbors will \ inevitably reduce the contribution of the target's own \ signals, which may reduce performance. Default `10` :param str parent_model: By default, :py:class:`nPLD` is run in \ stand-alone mode. The neighbor signals are computed directly \ from their TPFs, so there is no need to have run *PLD* on them \ beforehand. However, if :py:obj:`parent_model` \ is set, :py:class:`nPLD` will use information from the \ :py:obj:`parent_model` model of each neighboring star when \ de-trending. This is particularly useful for identifying \ outliers in the neighbor signals and preventing them from \ polluting the current target. Setting :py:obj:`parent_model` \ to :py:class:`rPLD`, for instance, will use the \ outlier information in the :py:class:`rPLD` model of the \ neighbors (this must have been run ahead of time). \ Note, however, that tests with *K2* data show that including \ outliers in the neighbor signals actually \ *improves* the performance, since many of these outliers \ are associated with events such as thruster firings and are \ present in all light curves, and therefore *help* in the \ de-trending. Default `None` ..note :: Optionally, the :py:obj:`neighbors` may be specified \ directly as a list of target IDs to use. \ In this case, users may also provide a list of \ :py:class:`everest.utils.DataContainer` instances \ corresponding to each of the neighbors in the \ :py:obj:`neighbors_data` kwarg. ''' # Get neighbors self.parent_model = kwargs.get('parent_model', None) neighbors = kwargs.get('neighbors', 10) neighbors_data = kwargs.get('neighbors_data', None) if hasattr(neighbors, '__len__'): self.neighbors = neighbors else: num_neighbors = neighbors self.neighbors = \ self._mission.GetNeighbors(self.ID, season=self.season, cadence=self.cadence, model=self.parent_model, neighbors=num_neighbors, mag_range=kwargs.get( 'mag_range', (11., 13.)), cdpp_range=kwargs.get( 'cdpp_range', None), aperture_name=self.aperture_name) if len(self.neighbors): if len(self.neighbors) < num_neighbors: log.warn("%d neighbors requested, but only %d found." % (num_neighbors, len(self.neighbors))) elif num_neighbors > 0: log.warn("No neighbors found! Running standard PLD...") for n, neighbor in enumerate(self.neighbors): log.info("Loading data for neighboring target %d..." % neighbor) if neighbors_data is not None: data = neighbors_data[n] data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) elif self.parent_model is not None and self.cadence == 'lc': # We load the `parent` model. The advantage here is # that outliers have properly been identified and masked. # I haven't tested this on short # cadence data, so I'm going to just forbid it... data = eval(self.parent_model)( neighbor, mission=self.mission, is_parent=True) else: # We load the data straight from the TPF. Much quicker, # since no model must be run in advance. Downside is we # don't know where the outliers are. But based # on tests with K2 data, the de-trending is actually # *better* if the outliers are # included! These are mostly thruster fire events and other # artifacts common to # all the stars, so it makes sense that we might want # to keep them in the design matrix. data = self._mission.GetData(neighbor, season=self.season, clobber=self.clobber_tpf, cadence=self.cadence, aperture_name=self.aperture_name, saturated_aperture_name= self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance= self.saturation_tolerance, get_hires=False, get_nearby=False) if data is None: raise Exception( "Unable to retrieve data for neighboring target.") data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) # Compute the linear PLD vectors and interpolate over # outliers, NaNs and bad timestamps X1 = data.fpix / data.fraw.reshape(-1, 1) X1 = Interpolate(data.time, data.mask, X1) if self.X1N is None: self.X1N = np.array(X1) else: self.X1N = np.hstack([self.X1N, X1]) del X1 del data
python
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param tuple cdpp_range: If :py:obj:`parent_model` is set, \ neighbors are selected only if \ their de-trended CDPPs fall within this range. Default `None` :param tuple mag_range: Only select neighbors whose magnitudes are \ within this range. Default (11., 13.) :param int neighbors: The number of neighboring stars to use in \ the de-trending. The higher this number, the more signals \ there are and hence the more de-trending information there is. \ However, the neighboring star signals are regularized together \ with the target's signals, so adding too many neighbors will \ inevitably reduce the contribution of the target's own \ signals, which may reduce performance. Default `10` :param str parent_model: By default, :py:class:`nPLD` is run in \ stand-alone mode. The neighbor signals are computed directly \ from their TPFs, so there is no need to have run *PLD* on them \ beforehand. However, if :py:obj:`parent_model` \ is set, :py:class:`nPLD` will use information from the \ :py:obj:`parent_model` model of each neighboring star when \ de-trending. This is particularly useful for identifying \ outliers in the neighbor signals and preventing them from \ polluting the current target. Setting :py:obj:`parent_model` \ to :py:class:`rPLD`, for instance, will use the \ outlier information in the :py:class:`rPLD` model of the \ neighbors (this must have been run ahead of time). \ Note, however, that tests with *K2* data show that including \ outliers in the neighbor signals actually \ *improves* the performance, since many of these outliers \ are associated with events such as thruster firings and are \ present in all light curves, and therefore *help* in the \ de-trending. Default `None` ..note :: Optionally, the :py:obj:`neighbors` may be specified \ directly as a list of target IDs to use. \ In this case, users may also provide a list of \ :py:class:`everest.utils.DataContainer` instances \ corresponding to each of the neighbors in the \ :py:obj:`neighbors_data` kwarg. ''' # Get neighbors self.parent_model = kwargs.get('parent_model', None) neighbors = kwargs.get('neighbors', 10) neighbors_data = kwargs.get('neighbors_data', None) if hasattr(neighbors, '__len__'): self.neighbors = neighbors else: num_neighbors = neighbors self.neighbors = \ self._mission.GetNeighbors(self.ID, season=self.season, cadence=self.cadence, model=self.parent_model, neighbors=num_neighbors, mag_range=kwargs.get( 'mag_range', (11., 13.)), cdpp_range=kwargs.get( 'cdpp_range', None), aperture_name=self.aperture_name) if len(self.neighbors): if len(self.neighbors) < num_neighbors: log.warn("%d neighbors requested, but only %d found." % (num_neighbors, len(self.neighbors))) elif num_neighbors > 0: log.warn("No neighbors found! Running standard PLD...") for n, neighbor in enumerate(self.neighbors): log.info("Loading data for neighboring target %d..." % neighbor) if neighbors_data is not None: data = neighbors_data[n] data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) elif self.parent_model is not None and self.cadence == 'lc': # We load the `parent` model. The advantage here is # that outliers have properly been identified and masked. # I haven't tested this on short # cadence data, so I'm going to just forbid it... data = eval(self.parent_model)( neighbor, mission=self.mission, is_parent=True) else: # We load the data straight from the TPF. Much quicker, # since no model must be run in advance. Downside is we # don't know where the outliers are. But based # on tests with K2 data, the de-trending is actually # *better* if the outliers are # included! These are mostly thruster fire events and other # artifacts common to # all the stars, so it makes sense that we might want # to keep them in the design matrix. data = self._mission.GetData(neighbor, season=self.season, clobber=self.clobber_tpf, cadence=self.cadence, aperture_name=self.aperture_name, saturated_aperture_name= self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance= self.saturation_tolerance, get_hires=False, get_nearby=False) if data is None: raise Exception( "Unable to retrieve data for neighboring target.") data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) # Compute the linear PLD vectors and interpolate over # outliers, NaNs and bad timestamps X1 = data.fpix / data.fraw.reshape(-1, 1) X1 = Interpolate(data.time, data.mask, X1) if self.X1N is None: self.X1N = np.array(X1) else: self.X1N = np.hstack([self.X1N, X1]) del X1 del data
[ "def", "setup", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Get neighbors", "self", ".", "parent_model", "=", "kwargs", ".", "get", "(", "'parent_model'", ",", "None", ")", "neighbors", "=", "kwargs", ".", "get", "(", "'neighbors'", ",", "10", ")", "neighbors_data", "=", "kwargs", ".", "get", "(", "'neighbors_data'", ",", "None", ")", "if", "hasattr", "(", "neighbors", ",", "'__len__'", ")", ":", "self", ".", "neighbors", "=", "neighbors", "else", ":", "num_neighbors", "=", "neighbors", "self", ".", "neighbors", "=", "self", ".", "_mission", ".", "GetNeighbors", "(", "self", ".", "ID", ",", "season", "=", "self", ".", "season", ",", "cadence", "=", "self", ".", "cadence", ",", "model", "=", "self", ".", "parent_model", ",", "neighbors", "=", "num_neighbors", ",", "mag_range", "=", "kwargs", ".", "get", "(", "'mag_range'", ",", "(", "11.", ",", "13.", ")", ")", ",", "cdpp_range", "=", "kwargs", ".", "get", "(", "'cdpp_range'", ",", "None", ")", ",", "aperture_name", "=", "self", ".", "aperture_name", ")", "if", "len", "(", "self", ".", "neighbors", ")", ":", "if", "len", "(", "self", ".", "neighbors", ")", "<", "num_neighbors", ":", "log", ".", "warn", "(", "\"%d neighbors requested, but only %d found.\"", "%", "(", "num_neighbors", ",", "len", "(", "self", ".", "neighbors", ")", ")", ")", "elif", "num_neighbors", ">", "0", ":", "log", ".", "warn", "(", "\"No neighbors found! Running standard PLD...\"", ")", "for", "n", ",", "neighbor", "in", "enumerate", "(", "self", ".", "neighbors", ")", ":", "log", ".", "info", "(", "\"Loading data for neighboring target %d...\"", "%", "neighbor", ")", "if", "neighbors_data", "is", "not", "None", ":", "data", "=", "neighbors_data", "[", "n", "]", "data", ".", "mask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "data", ".", "badmask", ",", "data", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "data", ".", "fraw", "=", "np", ".", "sum", "(", "data", ".", "fpix", ",", "axis", "=", "1", ")", "elif", "self", ".", "parent_model", "is", "not", "None", "and", "self", ".", "cadence", "==", "'lc'", ":", "# We load the `parent` model. The advantage here is", "# that outliers have properly been identified and masked.", "# I haven't tested this on short", "# cadence data, so I'm going to just forbid it...", "data", "=", "eval", "(", "self", ".", "parent_model", ")", "(", "neighbor", ",", "mission", "=", "self", ".", "mission", ",", "is_parent", "=", "True", ")", "else", ":", "# We load the data straight from the TPF. Much quicker,", "# since no model must be run in advance. Downside is we", "# don't know where the outliers are. But based", "# on tests with K2 data, the de-trending is actually", "# *better* if the outliers are", "# included! These are mostly thruster fire events and other", "# artifacts common to", "# all the stars, so it makes sense that we might want", "# to keep them in the design matrix.", "data", "=", "self", ".", "_mission", ".", "GetData", "(", "neighbor", ",", "season", "=", "self", ".", "season", ",", "clobber", "=", "self", ".", "clobber_tpf", ",", "cadence", "=", "self", ".", "cadence", ",", "aperture_name", "=", "self", ".", "aperture_name", ",", "saturated_aperture_name", "=", "self", ".", "saturated_aperture_name", ",", "max_pixels", "=", "self", ".", "max_pixels", ",", "saturation_tolerance", "=", "self", ".", "saturation_tolerance", ",", "get_hires", "=", "False", ",", "get_nearby", "=", "False", ")", "if", "data", "is", "None", ":", "raise", "Exception", "(", "\"Unable to retrieve data for neighboring target.\"", ")", "data", ".", "mask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "data", ".", "badmask", ",", "data", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "data", ".", "fraw", "=", "np", ".", "sum", "(", "data", ".", "fpix", ",", "axis", "=", "1", ")", "# Compute the linear PLD vectors and interpolate over", "# outliers, NaNs and bad timestamps", "X1", "=", "data", ".", "fpix", "/", "data", ".", "fraw", ".", "reshape", "(", "-", "1", ",", "1", ")", "X1", "=", "Interpolate", "(", "data", ".", "time", ",", "data", ".", "mask", ",", "X1", ")", "if", "self", ".", "X1N", "is", "None", ":", "self", ".", "X1N", "=", "np", ".", "array", "(", "X1", ")", "else", ":", "self", ".", "X1N", "=", "np", ".", "hstack", "(", "[", "self", ".", "X1N", ",", "X1", "]", ")", "del", "X1", "del", "data" ]
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param tuple cdpp_range: If :py:obj:`parent_model` is set, \ neighbors are selected only if \ their de-trended CDPPs fall within this range. Default `None` :param tuple mag_range: Only select neighbors whose magnitudes are \ within this range. Default (11., 13.) :param int neighbors: The number of neighboring stars to use in \ the de-trending. The higher this number, the more signals \ there are and hence the more de-trending information there is. \ However, the neighboring star signals are regularized together \ with the target's signals, so adding too many neighbors will \ inevitably reduce the contribution of the target's own \ signals, which may reduce performance. Default `10` :param str parent_model: By default, :py:class:`nPLD` is run in \ stand-alone mode. The neighbor signals are computed directly \ from their TPFs, so there is no need to have run *PLD* on them \ beforehand. However, if :py:obj:`parent_model` \ is set, :py:class:`nPLD` will use information from the \ :py:obj:`parent_model` model of each neighboring star when \ de-trending. This is particularly useful for identifying \ outliers in the neighbor signals and preventing them from \ polluting the current target. Setting :py:obj:`parent_model` \ to :py:class:`rPLD`, for instance, will use the \ outlier information in the :py:class:`rPLD` model of the \ neighbors (this must have been run ahead of time). \ Note, however, that tests with *K2* data show that including \ outliers in the neighbor signals actually \ *improves* the performance, since many of these outliers \ are associated with events such as thruster firings and are \ present in all light curves, and therefore *help* in the \ de-trending. Default `None` ..note :: Optionally, the :py:obj:`neighbors` may be specified \ directly as a list of target IDs to use. \ In this case, users may also provide a list of \ :py:class:`everest.utils.DataContainer` instances \ corresponding to each of the neighbors in the \ :py:obj:`neighbors_data` kwarg.
[ "This", "is", "called", "during", "production", "de", "-", "trending", "prior", "to", "calling", "the", ":", "py", ":", "obj", ":", "Detrender", ".", "run", "()", "method", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1336-L1458
rodluger/everest
everest/detrender.py
iPLD.setup
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param str parent_model: The name of the model to operate on. \ Default `nPLD` ''' # Load the parent model self.parent_model = kwargs.get('parent_model', 'nPLD') if not self.load_model(self.parent_model): raise Exception('Unable to load parent model.') # Save static copies of the de-trended flux, # the outlier mask and the lambda array self._norm = np.array(self.flux) self.recmask = np.array(self.mask) self.reclam = np.array(self.lam) # Now reset the model params self.optimize_gp = False nseg = len(self.breakpoints) self.lam_idx = -1 self.lam = [ [1e5] + [None for i in range(self.pld_order - 1)] for b in range(nseg)] self.cdpp_arr = np.array([np.nan for b in range(nseg)]) self.cdppr_arr = np.array([np.nan for b in range(nseg)]) self.cdppv_arr = np.array([np.nan for b in range(nseg)]) self.cdpp = np.nan self.cdppr = np.nan self.cdppv = np.nan self.cdppg = np.nan self.model = np.zeros_like(self.time) self.loaded = True
python
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param str parent_model: The name of the model to operate on. \ Default `nPLD` ''' # Load the parent model self.parent_model = kwargs.get('parent_model', 'nPLD') if not self.load_model(self.parent_model): raise Exception('Unable to load parent model.') # Save static copies of the de-trended flux, # the outlier mask and the lambda array self._norm = np.array(self.flux) self.recmask = np.array(self.mask) self.reclam = np.array(self.lam) # Now reset the model params self.optimize_gp = False nseg = len(self.breakpoints) self.lam_idx = -1 self.lam = [ [1e5] + [None for i in range(self.pld_order - 1)] for b in range(nseg)] self.cdpp_arr = np.array([np.nan for b in range(nseg)]) self.cdppr_arr = np.array([np.nan for b in range(nseg)]) self.cdppv_arr = np.array([np.nan for b in range(nseg)]) self.cdpp = np.nan self.cdppr = np.nan self.cdppv = np.nan self.cdppg = np.nan self.model = np.zeros_like(self.time) self.loaded = True
[ "def", "setup", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Load the parent model", "self", ".", "parent_model", "=", "kwargs", ".", "get", "(", "'parent_model'", ",", "'nPLD'", ")", "if", "not", "self", ".", "load_model", "(", "self", ".", "parent_model", ")", ":", "raise", "Exception", "(", "'Unable to load parent model.'", ")", "# Save static copies of the de-trended flux,", "# the outlier mask and the lambda array", "self", ".", "_norm", "=", "np", ".", "array", "(", "self", ".", "flux", ")", "self", ".", "recmask", "=", "np", ".", "array", "(", "self", ".", "mask", ")", "self", ".", "reclam", "=", "np", ".", "array", "(", "self", ".", "lam", ")", "# Now reset the model params", "self", ".", "optimize_gp", "=", "False", "nseg", "=", "len", "(", "self", ".", "breakpoints", ")", "self", ".", "lam_idx", "=", "-", "1", "self", ".", "lam", "=", "[", "[", "1e5", "]", "+", "[", "None", "for", "i", "in", "range", "(", "self", ".", "pld_order", "-", "1", ")", "]", "for", "b", "in", "range", "(", "nseg", ")", "]", "self", ".", "cdpp_arr", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "for", "b", "in", "range", "(", "nseg", ")", "]", ")", "self", ".", "cdppr_arr", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "for", "b", "in", "range", "(", "nseg", ")", "]", ")", "self", ".", "cdppv_arr", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "for", "b", "in", "range", "(", "nseg", ")", "]", ")", "self", ".", "cdpp", "=", "np", ".", "nan", "self", ".", "cdppr", "=", "np", ".", "nan", "self", ".", "cdppv", "=", "np", ".", "nan", "self", ".", "cdppg", "=", "np", ".", "nan", "self", ".", "model", "=", "np", ".", "zeros_like", "(", "self", ".", "time", ")", "self", ".", "loaded", "=", "True" ]
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param str parent_model: The name of the model to operate on. \ Default `nPLD`
[ "This", "is", "called", "during", "production", "de", "-", "trending", "prior", "to", "calling", "the", ":", "py", ":", "obj", ":", "Detrender", ".", "run", "()", "method", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1469-L1505
rodluger/everest
everest/detrender.py
pPLD.setup
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param inter piter: The number of iterations in the minimizer. \ Default 3 :param int pmaxf: The maximum number of function evaluations per \ iteration. Default 300 :param float ppert: The fractional amplitude of the perturbation on \ the initial guess. Default 0.1 ''' # Check for saved model clobber = self.clobber self.clobber = False if not self.load_model('nPLD'): raise Exception("Can't find `nPLD` model for target.") self.clobber = clobber # Powell iterations self.piter = kwargs.get('piter', 3) self.pmaxf = kwargs.get('pmaxf', 300) self.ppert = kwargs.get('ppert', 0.1)
python
def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param inter piter: The number of iterations in the minimizer. \ Default 3 :param int pmaxf: The maximum number of function evaluations per \ iteration. Default 300 :param float ppert: The fractional amplitude of the perturbation on \ the initial guess. Default 0.1 ''' # Check for saved model clobber = self.clobber self.clobber = False if not self.load_model('nPLD'): raise Exception("Can't find `nPLD` model for target.") self.clobber = clobber # Powell iterations self.piter = kwargs.get('piter', 3) self.pmaxf = kwargs.get('pmaxf', 300) self.ppert = kwargs.get('ppert', 0.1)
[ "def", "setup", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Check for saved model", "clobber", "=", "self", ".", "clobber", "self", ".", "clobber", "=", "False", "if", "not", "self", ".", "load_model", "(", "'nPLD'", ")", ":", "raise", "Exception", "(", "\"Can't find `nPLD` model for target.\"", ")", "self", ".", "clobber", "=", "clobber", "# Powell iterations", "self", ".", "piter", "=", "kwargs", ".", "get", "(", "'piter'", ",", "3", ")", "self", ".", "pmaxf", "=", "kwargs", ".", "get", "(", "'pmaxf'", ",", "300", ")", "self", ".", "ppert", "=", "kwargs", ".", "get", "(", "'ppert'", ",", "0.1", ")" ]
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param inter piter: The number of iterations in the minimizer. \ Default 3 :param int pmaxf: The maximum number of function evaluations per \ iteration. Default 300 :param float ppert: The fractional amplitude of the perturbation on \ the initial guess. Default 0.1
[ "This", "is", "called", "during", "production", "de", "-", "trending", "prior", "to", "calling", "the", ":", "py", ":", "obj", ":", "Detrender", ".", "run", "()", "method", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1515-L1539
rodluger/everest
everest/detrender.py
pPLD.run
def run(self): ''' Runs the de-trending. ''' try: # Plot original self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.plot_lc(self.dvs.left(), info_right='nPLD', color='k') # Cross-validate self.cross_validate(self.dvs.right()) self.compute() self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() # Plot new self.plot_lc(self.dvs.left(), info_right='Powell', color='k') # Save self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)
python
def run(self): ''' Runs the de-trending. ''' try: # Plot original self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.plot_lc(self.dvs.left(), info_right='nPLD', color='k') # Cross-validate self.cross_validate(self.dvs.right()) self.compute() self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() # Plot new self.plot_lc(self.dvs.left(), info_right='Powell', color='k') # Save self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)
[ "def", "run", "(", "self", ")", ":", "try", ":", "# Plot original", "self", ".", "plot_aperture", "(", "[", "self", ".", "dvs", ".", "top_right", "(", ")", "for", "i", "in", "range", "(", "4", ")", "]", ")", "self", ".", "plot_lc", "(", "self", ".", "dvs", ".", "left", "(", ")", ",", "info_right", "=", "'nPLD'", ",", "color", "=", "'k'", ")", "# Cross-validate", "self", ".", "cross_validate", "(", "self", ".", "dvs", ".", "right", "(", ")", ")", "self", ".", "compute", "(", ")", "self", ".", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "self", ".", "cdpp", "=", "self", ".", "get_cdpp", "(", ")", "# Plot new", "self", ".", "plot_lc", "(", "self", ".", "dvs", ".", "left", "(", ")", ",", "info_right", "=", "'Powell'", ",", "color", "=", "'k'", ")", "# Save", "self", ".", "plot_final", "(", "self", ".", "dvs", ".", "top_left", "(", ")", ")", "self", ".", "plot_info", "(", "self", ".", "dvs", ")", "self", ".", "save_model", "(", ")", "except", ":", "self", ".", "exception_handler", "(", "self", ".", "debug", ")" ]
Runs the de-trending.
[ "Runs", "the", "de", "-", "trending", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1541-L1569
rodluger/everest
everest/detrender.py
pPLD.cross_validate
def cross_validate(self, ax): ''' Performs the cross-validation step. ''' # The CDPP to beat cdpp_opt = self.get_cdpp_arr() # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) # Mask for current chunk m = self.get_masked_chunk(b) # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(self.fraw) # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # The pre-computed matrices pre_v = [self.cv_precompute(mask, b) for mask in masks] # Initialize with the nPLD solution log_lam_opt = np.log10(self.lam[b]) scatter_opt = self.validation_scatter( log_lam_opt, b, masks, pre_v, gp, flux, time, med) log.info("Iter 0/%d: " % (self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) # Do `piter` iterations for p in range(self.piter): # Perturb the initial condition a bit log_lam = np.array( np.log10(self.lam[b])) * \ (1 + self.ppert * np.random.randn(len(self.lam[b]))) scatter = self.validation_scatter( log_lam, b, masks, pre_v, gp, flux, time, med) log.info("Initializing at: " + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # Call the minimizer log_lam, scatter, _, _, _, _ = \ fmin_powell(self.validation_scatter, log_lam, args=(b, masks, pre_v, gp, flux, time, med), maxfun=self.pmaxf, disp=False, full_output=True) # Did it improve the CDPP? tmp = np.array(self.lam[b]) self.lam[b] = 10 ** log_lam self.compute() cdpp = self.get_cdpp_arr()[b] self.lam[b] = tmp if cdpp < cdpp_opt[b]: cdpp_opt[b] = cdpp log_lam_opt = log_lam # Log it log.info("Iter %d/%d: " % (p + 1, self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # The best solution log.info("Found minimum: logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) self.lam[b] = 10 ** log_lam_opt # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) color = ['k', 'b', 'r', 'g', 'y'] for n in range(self.pld_order): ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '.', color=color[n]) ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '-', color=color[n], alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP cdpp_arr = self.get_cdpp_arr() ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1))
python
def cross_validate(self, ax): ''' Performs the cross-validation step. ''' # The CDPP to beat cdpp_opt = self.get_cdpp_arr() # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) # Mask for current chunk m = self.get_masked_chunk(b) # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(self.fraw) # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # The pre-computed matrices pre_v = [self.cv_precompute(mask, b) for mask in masks] # Initialize with the nPLD solution log_lam_opt = np.log10(self.lam[b]) scatter_opt = self.validation_scatter( log_lam_opt, b, masks, pre_v, gp, flux, time, med) log.info("Iter 0/%d: " % (self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) # Do `piter` iterations for p in range(self.piter): # Perturb the initial condition a bit log_lam = np.array( np.log10(self.lam[b])) * \ (1 + self.ppert * np.random.randn(len(self.lam[b]))) scatter = self.validation_scatter( log_lam, b, masks, pre_v, gp, flux, time, med) log.info("Initializing at: " + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # Call the minimizer log_lam, scatter, _, _, _, _ = \ fmin_powell(self.validation_scatter, log_lam, args=(b, masks, pre_v, gp, flux, time, med), maxfun=self.pmaxf, disp=False, full_output=True) # Did it improve the CDPP? tmp = np.array(self.lam[b]) self.lam[b] = 10 ** log_lam self.compute() cdpp = self.get_cdpp_arr()[b] self.lam[b] = tmp if cdpp < cdpp_opt[b]: cdpp_opt[b] = cdpp log_lam_opt = log_lam # Log it log.info("Iter %d/%d: " % (p + 1, self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # The best solution log.info("Found minimum: logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) self.lam[b] = 10 ** log_lam_opt # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) color = ['k', 'b', 'r', 'g', 'y'] for n in range(self.pld_order): ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '.', color=color[n]) ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '-', color=color[n], alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP cdpp_arr = self.get_cdpp_arr() ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1))
[ "def", "cross_validate", "(", "self", ",", "ax", ")", ":", "# The CDPP to beat", "cdpp_opt", "=", "self", ".", "get_cdpp_arr", "(", ")", "# Loop over all chunks", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "log", ".", "info", "(", "\"Cross-validating chunk %d/%d...\"", "%", "(", "b", "+", "1", ",", "len", "(", "self", ".", "breakpoints", ")", ")", ")", "# Mask for current chunk", "m", "=", "self", ".", "get_masked_chunk", "(", "b", ")", "# Mask transits and outliers", "time", "=", "self", ".", "time", "[", "m", "]", "flux", "=", "self", ".", "fraw", "[", "m", "]", "ferr", "=", "self", ".", "fraw_err", "[", "m", "]", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "fraw", ")", "# Setup the GP", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "white", "=", "False", ")", "gp", ".", "compute", "(", "time", ",", "ferr", ")", "# The masks", "masks", "=", "list", "(", "Chunks", "(", "np", ".", "arange", "(", "0", ",", "len", "(", "time", ")", ")", ",", "len", "(", "time", ")", "//", "self", ".", "cdivs", ")", ")", "# The pre-computed matrices", "pre_v", "=", "[", "self", ".", "cv_precompute", "(", "mask", ",", "b", ")", "for", "mask", "in", "masks", "]", "# Initialize with the nPLD solution", "log_lam_opt", "=", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", ")", "scatter_opt", "=", "self", ".", "validation_scatter", "(", "log_lam_opt", ",", "b", ",", "masks", ",", "pre_v", ",", "gp", ",", "flux", ",", "time", ",", "med", ")", "log", ".", "info", "(", "\"Iter 0/%d: \"", "%", "(", "self", ".", "piter", ")", "+", "\"logL = (%s), s = %.3f\"", "%", "(", "\", \"", ".", "join", "(", "[", "\"%.3f\"", "%", "l", "for", "l", "in", "log_lam_opt", "]", ")", ",", "scatter_opt", ")", ")", "# Do `piter` iterations", "for", "p", "in", "range", "(", "self", ".", "piter", ")", ":", "# Perturb the initial condition a bit", "log_lam", "=", "np", ".", "array", "(", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", ")", ")", "*", "(", "1", "+", "self", ".", "ppert", "*", "np", ".", "random", ".", "randn", "(", "len", "(", "self", ".", "lam", "[", "b", "]", ")", ")", ")", "scatter", "=", "self", ".", "validation_scatter", "(", "log_lam", ",", "b", ",", "masks", ",", "pre_v", ",", "gp", ",", "flux", ",", "time", ",", "med", ")", "log", ".", "info", "(", "\"Initializing at: \"", "+", "\"logL = (%s), s = %.3f\"", "%", "(", "\", \"", ".", "join", "(", "[", "\"%.3f\"", "%", "l", "for", "l", "in", "log_lam", "]", ")", ",", "scatter", ")", ")", "# Call the minimizer", "log_lam", ",", "scatter", ",", "_", ",", "_", ",", "_", ",", "_", "=", "fmin_powell", "(", "self", ".", "validation_scatter", ",", "log_lam", ",", "args", "=", "(", "b", ",", "masks", ",", "pre_v", ",", "gp", ",", "flux", ",", "time", ",", "med", ")", ",", "maxfun", "=", "self", ".", "pmaxf", ",", "disp", "=", "False", ",", "full_output", "=", "True", ")", "# Did it improve the CDPP?", "tmp", "=", "np", ".", "array", "(", "self", ".", "lam", "[", "b", "]", ")", "self", ".", "lam", "[", "b", "]", "=", "10", "**", "log_lam", "self", ".", "compute", "(", ")", "cdpp", "=", "self", ".", "get_cdpp_arr", "(", ")", "[", "b", "]", "self", ".", "lam", "[", "b", "]", "=", "tmp", "if", "cdpp", "<", "cdpp_opt", "[", "b", "]", ":", "cdpp_opt", "[", "b", "]", "=", "cdpp", "log_lam_opt", "=", "log_lam", "# Log it", "log", ".", "info", "(", "\"Iter %d/%d: \"", "%", "(", "p", "+", "1", ",", "self", ".", "piter", ")", "+", "\"logL = (%s), s = %.3f\"", "%", "(", "\", \"", ".", "join", "(", "[", "\"%.3f\"", "%", "l", "for", "l", "in", "log_lam", "]", ")", ",", "scatter", ")", ")", "# The best solution", "log", ".", "info", "(", "\"Found minimum: logL = (%s), s = %.3f\"", "%", "(", "\", \"", ".", "join", "(", "[", "\"%.3f\"", "%", "l", "for", "l", "in", "log_lam_opt", "]", ")", ",", "scatter_opt", ")", ")", "self", ".", "lam", "[", "b", "]", "=", "10", "**", "log_lam_opt", "# We're just going to plot lambda as a function of chunk number", "bs", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "breakpoints", ")", ")", "color", "=", "[", "'k'", ",", "'b'", ",", "'r'", ",", "'g'", ",", "'y'", "]", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "ax", "[", "0", "]", ".", "plot", "(", "bs", "+", "1", ",", "[", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", ")", "for", "b", "in", "bs", "]", ",", "'.'", ",", "color", "=", "color", "[", "n", "]", ")", "ax", "[", "0", "]", ".", "plot", "(", "bs", "+", "1", ",", "[", "np", ".", "log10", "(", "self", ".", "lam", "[", "b", "]", "[", "n", "]", ")", "for", "b", "in", "bs", "]", ",", "'-'", ",", "color", "=", "color", "[", "n", "]", ",", "alpha", "=", "0.25", ")", "ax", "[", "0", "]", ".", "set_ylabel", "(", "r'$\\log\\Lambda$'", ",", "fontsize", "=", "5", ")", "ax", "[", "0", "]", ".", "margins", "(", "0.1", ",", "0.1", ")", "ax", "[", "0", "]", ".", "set_xticks", "(", "np", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "breakpoints", ")", "+", "1", ")", ")", "ax", "[", "0", "]", ".", "set_xticklabels", "(", "[", "]", ")", "# Now plot the CDPP", "cdpp_arr", "=", "self", ".", "get_cdpp_arr", "(", ")", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdpp_arr", ",", "'b.'", ")", "ax", "[", "1", "]", ".", "plot", "(", "bs", "+", "1", ",", "cdpp_arr", ",", "'b-'", ",", "alpha", "=", "0.25", ")", "ax", "[", "1", "]", ".", "margins", "(", "0.1", ",", "0.1", ")", "ax", "[", "1", "]", ".", "set_ylabel", "(", "r'Scatter (ppm)'", ",", "fontsize", "=", "5", ")", "ax", "[", "1", "]", ".", "set_xlabel", "(", "r'Chunk'", ",", "fontsize", "=", "5", ")", "ax", "[", "1", "]", ".", "set_xticks", "(", "np", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "breakpoints", ")", "+", "1", ")", ")" ]
Performs the cross-validation step.
[ "Performs", "the", "cross", "-", "validation", "step", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1571-L1676
rodluger/everest
everest/detrender.py
pPLD.validation_scatter
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux, time, med): ''' Computes the scatter in the validation set. ''' # Update the lambda matrix self.lam[b] = 10 ** log_lam # Validation set scatter scatter = [None for i in range(len(masks))] for i in range(len(masks)): model = self.cv_compute(b, *pre_v[i]) try: gpm, _ = gp.predict(flux - model - med, time[masks[i]]) except ValueError: # Sometimes the model can have NaNs if # `lambda` is a crazy value return 1.e30 fdet = (flux - model)[masks[i]] - gpm scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med - np.nanmedian(fdet / med))) / np.sqrt(len(masks[i]))) return np.max(scatter)
python
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux, time, med): ''' Computes the scatter in the validation set. ''' # Update the lambda matrix self.lam[b] = 10 ** log_lam # Validation set scatter scatter = [None for i in range(len(masks))] for i in range(len(masks)): model = self.cv_compute(b, *pre_v[i]) try: gpm, _ = gp.predict(flux - model - med, time[masks[i]]) except ValueError: # Sometimes the model can have NaNs if # `lambda` is a crazy value return 1.e30 fdet = (flux - model)[masks[i]] - gpm scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med - np.nanmedian(fdet / med))) / np.sqrt(len(masks[i]))) return np.max(scatter)
[ "def", "validation_scatter", "(", "self", ",", "log_lam", ",", "b", ",", "masks", ",", "pre_v", ",", "gp", ",", "flux", ",", "time", ",", "med", ")", ":", "# Update the lambda matrix", "self", ".", "lam", "[", "b", "]", "=", "10", "**", "log_lam", "# Validation set scatter", "scatter", "=", "[", "None", "for", "i", "in", "range", "(", "len", "(", "masks", ")", ")", "]", "for", "i", "in", "range", "(", "len", "(", "masks", ")", ")", ":", "model", "=", "self", ".", "cv_compute", "(", "b", ",", "*", "pre_v", "[", "i", "]", ")", "try", ":", "gpm", ",", "_", "=", "gp", ".", "predict", "(", "flux", "-", "model", "-", "med", ",", "time", "[", "masks", "[", "i", "]", "]", ")", "except", "ValueError", ":", "# Sometimes the model can have NaNs if", "# `lambda` is a crazy value", "return", "1.e30", "fdet", "=", "(", "flux", "-", "model", ")", "[", "masks", "[", "i", "]", "]", "-", "gpm", "scatter", "[", "i", "]", "=", "1.e6", "*", "(", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "fdet", "/", "med", "-", "np", ".", "nanmedian", "(", "fdet", "/", "med", ")", ")", ")", "/", "np", ".", "sqrt", "(", "len", "(", "masks", "[", "i", "]", ")", ")", ")", "return", "np", ".", "max", "(", "scatter", ")" ]
Computes the scatter in the validation set.
[ "Computes", "the", "scatter", "in", "the", "validation", "set", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L1678-L1703
lsbardel/python-stdnet
stdnet/utils/populate.py
populate
def populate(datatype='string', size=10, start=None, end=None, converter=None, choice_from=None, **kwargs): '''Utility function for populating lists with random data. Useful for populating database with data for fuzzy testing. Supported data-types * *string* For example:: populate('string',100, min_len=3, max_len=10) create a 100 elements list with random strings with random length between 3 and 10 * *date* For example:: from datetime import date populate('date',200, start = date(1997,1,1), end = date.today()) create a 200 elements list with random datetime.date objects between *start* and *end* * *integer* For example:: populate('integer',200, start = 0, end = 1000) create a 200 elements list with random int between *start* and *end* * *float* For example:: populate('float', 200, start = 0, end = 10) create a 200 elements list with random floats between *start* and *end* * *choice* (elements of an iterable) For example:: populate('choice', 200, choice_from = ['pippo','pluto','blob']) create a 200 elements list with random elements from *choice_from*. ''' data = [] converter = converter or def_converter if datatype == 'date': date_end = end or date.today() date_start = start or date(1990, 1, 1) delta = date_end - date_start for s in range(size): data.append(converter(random_date(date_start, delta.days))) elif datatype == 'integer': start = start or 0 end = end or 1000000 for s in range(size): data.append(converter(randint(start, end))) elif datatype == 'float': start = start or 0 end = end or 10 for s in range(size): data.append(converter(uniform(start, end))) elif datatype == 'choice' and choice_from: for s in range(size): data.append(choice(list(choice_from))) else: for s in range(size): data.append(converter(random_string(**kwargs))) return data
python
def populate(datatype='string', size=10, start=None, end=None, converter=None, choice_from=None, **kwargs): '''Utility function for populating lists with random data. Useful for populating database with data for fuzzy testing. Supported data-types * *string* For example:: populate('string',100, min_len=3, max_len=10) create a 100 elements list with random strings with random length between 3 and 10 * *date* For example:: from datetime import date populate('date',200, start = date(1997,1,1), end = date.today()) create a 200 elements list with random datetime.date objects between *start* and *end* * *integer* For example:: populate('integer',200, start = 0, end = 1000) create a 200 elements list with random int between *start* and *end* * *float* For example:: populate('float', 200, start = 0, end = 10) create a 200 elements list with random floats between *start* and *end* * *choice* (elements of an iterable) For example:: populate('choice', 200, choice_from = ['pippo','pluto','blob']) create a 200 elements list with random elements from *choice_from*. ''' data = [] converter = converter or def_converter if datatype == 'date': date_end = end or date.today() date_start = start or date(1990, 1, 1) delta = date_end - date_start for s in range(size): data.append(converter(random_date(date_start, delta.days))) elif datatype == 'integer': start = start or 0 end = end or 1000000 for s in range(size): data.append(converter(randint(start, end))) elif datatype == 'float': start = start or 0 end = end or 10 for s in range(size): data.append(converter(uniform(start, end))) elif datatype == 'choice' and choice_from: for s in range(size): data.append(choice(list(choice_from))) else: for s in range(size): data.append(converter(random_string(**kwargs))) return data
[ "def", "populate", "(", "datatype", "=", "'string'", ",", "size", "=", "10", ",", "start", "=", "None", ",", "end", "=", "None", ",", "converter", "=", "None", ",", "choice_from", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "[", "]", "converter", "=", "converter", "or", "def_converter", "if", "datatype", "==", "'date'", ":", "date_end", "=", "end", "or", "date", ".", "today", "(", ")", "date_start", "=", "start", "or", "date", "(", "1990", ",", "1", ",", "1", ")", "delta", "=", "date_end", "-", "date_start", "for", "s", "in", "range", "(", "size", ")", ":", "data", ".", "append", "(", "converter", "(", "random_date", "(", "date_start", ",", "delta", ".", "days", ")", ")", ")", "elif", "datatype", "==", "'integer'", ":", "start", "=", "start", "or", "0", "end", "=", "end", "or", "1000000", "for", "s", "in", "range", "(", "size", ")", ":", "data", ".", "append", "(", "converter", "(", "randint", "(", "start", ",", "end", ")", ")", ")", "elif", "datatype", "==", "'float'", ":", "start", "=", "start", "or", "0", "end", "=", "end", "or", "10", "for", "s", "in", "range", "(", "size", ")", ":", "data", ".", "append", "(", "converter", "(", "uniform", "(", "start", ",", "end", ")", ")", ")", "elif", "datatype", "==", "'choice'", "and", "choice_from", ":", "for", "s", "in", "range", "(", "size", ")", ":", "data", ".", "append", "(", "choice", "(", "list", "(", "choice_from", ")", ")", ")", "else", ":", "for", "s", "in", "range", "(", "size", ")", ":", "data", ".", "append", "(", "converter", "(", "random_string", "(", "*", "*", "kwargs", ")", ")", ")", "return", "data" ]
Utility function for populating lists with random data. Useful for populating database with data for fuzzy testing. Supported data-types * *string* For example:: populate('string',100, min_len=3, max_len=10) create a 100 elements list with random strings with random length between 3 and 10 * *date* For example:: from datetime import date populate('date',200, start = date(1997,1,1), end = date.today()) create a 200 elements list with random datetime.date objects between *start* and *end* * *integer* For example:: populate('integer',200, start = 0, end = 1000) create a 200 elements list with random int between *start* and *end* * *float* For example:: populate('float', 200, start = 0, end = 10) create a 200 elements list with random floats between *start* and *end* * *choice* (elements of an iterable) For example:: populate('choice', 200, choice_from = ['pippo','pluto','blob']) create a 200 elements list with random elements from *choice_from*.
[ "Utility", "function", "for", "populating", "lists", "with", "random", "data", ".", "Useful", "for", "populating", "database", "with", "data", "for", "fuzzy", "testing", ".", "Supported", "data", "-", "types", "*", "*", "string", "*", "For", "example", "::", "populate", "(", "string", "100", "min_len", "=", "3", "max_len", "=", "10", ")", "create", "a", "100", "elements", "list", "with", "random", "strings", "with", "random", "length", "between", "3", "and", "10", "*", "*", "date", "*", "For", "example", "::", "from", "datetime", "import", "date", "populate", "(", "date", "200", "start", "=", "date", "(", "1997", "1", "1", ")", "end", "=", "date", ".", "today", "()", ")", "create", "a", "200", "elements", "list", "with", "random", "datetime", ".", "date", "objects", "between", "*", "start", "*", "and", "*", "end", "*", "*", "*", "integer", "*", "For", "example", "::", "populate", "(", "integer", "200", "start", "=", "0", "end", "=", "1000", ")", "create", "a", "200", "elements", "list", "with", "random", "int", "between", "*", "start", "*", "and", "*", "end", "*", "*", "*", "float", "*", "For", "example", "::", "populate", "(", "float", "200", "start", "=", "0", "end", "=", "10", ")", "create", "a", "200", "elements", "list", "with", "random", "floats", "between", "*", "start", "*", "and", "*", "end", "*", "*", "*", "choice", "*", "(", "elements", "of", "an", "iterable", ")", "For", "example", "::", "populate", "(", "choice", "200", "choice_from", "=", "[", "pippo", "pluto", "blob", "]", ")", "create", "a", "200", "elements", "list", "with", "random", "elements", "from", "*", "choice_from", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/populate.py#L16-L84
rodluger/everest
everest/search.py
Search
def Search(star, pos_tol=2.5, neg_tol=50., **ps_kwargs): ''' NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations. ''' # Smooth the light curve t = np.delete(star.time, np.concatenate([star.nanmask, star.badmask])) f = np.delete(star.flux, np.concatenate([star.nanmask, star.badmask])) f = SavGol(f) med = np.nanmedian(f) # Kill positive outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(star.time == t[i]) for i in pos_inds]) # Kill negative outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(star.time == t[i]) for i in neg_inds]) # Replace the star.outmask array star.outmask = np.concatenate([neg_inds, pos_inds]) star.transitmask = np.array([], dtype=int) # Delta chi squared TIME = np.array([]) DEPTH = np.array([]) VARDEPTH = np.array([]) DELCHISQ = np.array([]) for b, brkpt in enumerate(star.breakpoints): # Log log.info('Running chunk %d/%d...' % (b + 1, len(star.breakpoints))) # Masks for current chunk m = star.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(star.kernel, star.kernel_params, star.time[m], star.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(star.pld_order): XM = star.X(n, m) A += star.lam[b][n] * np.dot(XM, XM.T) K += A CDK = cho_factor(K) # Baseline med = np.nanmedian(star.fraw[m]) lnL0 = -0.5 * np.dot(star.fraw[m], cho_solve(CDK, star.fraw[m])) dt = np.median(np.diff(star.time[m])) # Create a uniform time array and get indices of missing cadences tol = np.nanmedian(np.diff(star.time[m])) / 5. tunif = np.arange(star.time[m][0], star.time[m][-1] + tol, dt) tnogaps = np.array(tunif) gaps = [] j = 0 for i, t in enumerate(tunif): if np.abs(star.time[m][j] - t) < tol: tnogaps[i] = star.time[m][j] j += 1 if j == len(star.time[m]): break else: gaps.append(i) gaps = np.array(gaps, dtype=int) # Compute the normalized transit model for a single transit transit_model = TransitShape(**ps_kwargs) # Now roll the transit model across each cadence dchisq = np.zeros(len(tnogaps)) d = np.zeros(len(tnogaps)) vard = np.zeros(len(tnogaps)) for i in prange(len(tnogaps)): trn = transit_model(tnogaps, tnogaps[i]) trn = np.delete(trn, gaps) trn *= med vard[i] = 1. / np.dot(trn, cho_solve(CDK, trn)) if not np.isfinite(vard[i]): vard[i] = np.nan d[i] = np.nan dchisq[i] = np.nan continue d[i] = vard[i] * np.dot(trn, cho_solve(CDK, star.fraw[m])) r = star.fraw[m] - trn * d[i] lnL = -0.5 * np.dot(r, cho_solve(CDK, r)) dchisq[i] = -2 * (lnL0 - lnL) TIME = np.append(TIME, tnogaps) DEPTH = np.append(DEPTH, d) VARDEPTH = np.append(VARDEPTH, vard) DELCHISQ = np.append(DELCHISQ, dchisq) return TIME, DEPTH, VARDEPTH, DELCHISQ
python
def Search(star, pos_tol=2.5, neg_tol=50., **ps_kwargs): ''' NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations. ''' # Smooth the light curve t = np.delete(star.time, np.concatenate([star.nanmask, star.badmask])) f = np.delete(star.flux, np.concatenate([star.nanmask, star.badmask])) f = SavGol(f) med = np.nanmedian(f) # Kill positive outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(star.time == t[i]) for i in pos_inds]) # Kill negative outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(star.time == t[i]) for i in neg_inds]) # Replace the star.outmask array star.outmask = np.concatenate([neg_inds, pos_inds]) star.transitmask = np.array([], dtype=int) # Delta chi squared TIME = np.array([]) DEPTH = np.array([]) VARDEPTH = np.array([]) DELCHISQ = np.array([]) for b, brkpt in enumerate(star.breakpoints): # Log log.info('Running chunk %d/%d...' % (b + 1, len(star.breakpoints))) # Masks for current chunk m = star.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(star.kernel, star.kernel_params, star.time[m], star.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(star.pld_order): XM = star.X(n, m) A += star.lam[b][n] * np.dot(XM, XM.T) K += A CDK = cho_factor(K) # Baseline med = np.nanmedian(star.fraw[m]) lnL0 = -0.5 * np.dot(star.fraw[m], cho_solve(CDK, star.fraw[m])) dt = np.median(np.diff(star.time[m])) # Create a uniform time array and get indices of missing cadences tol = np.nanmedian(np.diff(star.time[m])) / 5. tunif = np.arange(star.time[m][0], star.time[m][-1] + tol, dt) tnogaps = np.array(tunif) gaps = [] j = 0 for i, t in enumerate(tunif): if np.abs(star.time[m][j] - t) < tol: tnogaps[i] = star.time[m][j] j += 1 if j == len(star.time[m]): break else: gaps.append(i) gaps = np.array(gaps, dtype=int) # Compute the normalized transit model for a single transit transit_model = TransitShape(**ps_kwargs) # Now roll the transit model across each cadence dchisq = np.zeros(len(tnogaps)) d = np.zeros(len(tnogaps)) vard = np.zeros(len(tnogaps)) for i in prange(len(tnogaps)): trn = transit_model(tnogaps, tnogaps[i]) trn = np.delete(trn, gaps) trn *= med vard[i] = 1. / np.dot(trn, cho_solve(CDK, trn)) if not np.isfinite(vard[i]): vard[i] = np.nan d[i] = np.nan dchisq[i] = np.nan continue d[i] = vard[i] * np.dot(trn, cho_solve(CDK, star.fraw[m])) r = star.fraw[m] - trn * d[i] lnL = -0.5 * np.dot(r, cho_solve(CDK, r)) dchisq[i] = -2 * (lnL0 - lnL) TIME = np.append(TIME, tnogaps) DEPTH = np.append(DEPTH, d) VARDEPTH = np.append(VARDEPTH, vard) DELCHISQ = np.append(DELCHISQ, dchisq) return TIME, DEPTH, VARDEPTH, DELCHISQ
[ "def", "Search", "(", "star", ",", "pos_tol", "=", "2.5", ",", "neg_tol", "=", "50.", ",", "*", "*", "ps_kwargs", ")", ":", "# Smooth the light curve", "t", "=", "np", ".", "delete", "(", "star", ".", "time", ",", "np", ".", "concatenate", "(", "[", "star", ".", "nanmask", ",", "star", ".", "badmask", "]", ")", ")", "f", "=", "np", ".", "delete", "(", "star", ".", "flux", ",", "np", ".", "concatenate", "(", "[", "star", ".", "nanmask", ",", "star", ".", "badmask", "]", ")", ")", "f", "=", "SavGol", "(", "f", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "# Kill positive outliers", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "pos_inds", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "pos_tol", "*", "MAD", ")", ")", "[", "0", "]", "pos_inds", "=", "np", ".", "array", "(", "[", "np", ".", "argmax", "(", "star", ".", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "pos_inds", "]", ")", "# Kill negative outliers", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "neg_inds", "=", "np", ".", "where", "(", "(", "f", "<", "med", "-", "neg_tol", "*", "MAD", ")", ")", "[", "0", "]", "neg_inds", "=", "np", ".", "array", "(", "[", "np", ".", "argmax", "(", "star", ".", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "neg_inds", "]", ")", "# Replace the star.outmask array", "star", ".", "outmask", "=", "np", ".", "concatenate", "(", "[", "neg_inds", ",", "pos_inds", "]", ")", "star", ".", "transitmask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "# Delta chi squared", "TIME", "=", "np", ".", "array", "(", "[", "]", ")", "DEPTH", "=", "np", ".", "array", "(", "[", "]", ")", "VARDEPTH", "=", "np", ".", "array", "(", "[", "]", ")", "DELCHISQ", "=", "np", ".", "array", "(", "[", "]", ")", "for", "b", ",", "brkpt", "in", "enumerate", "(", "star", ".", "breakpoints", ")", ":", "# Log", "log", ".", "info", "(", "'Running chunk %d/%d...'", "%", "(", "b", "+", "1", ",", "len", "(", "star", ".", "breakpoints", ")", ")", ")", "# Masks for current chunk", "m", "=", "star", ".", "get_masked_chunk", "(", "b", ",", "pad", "=", "False", ")", "# This block of the masked covariance matrix", "K", "=", "GetCovariance", "(", "star", ".", "kernel", ",", "star", ".", "kernel_params", ",", "star", ".", "time", "[", "m", "]", ",", "star", ".", "fraw_err", "[", "m", "]", ")", "# The masked X.L.X^T term", "A", "=", "np", ".", "zeros", "(", "(", "len", "(", "m", ")", ",", "len", "(", "m", ")", ")", ")", "for", "n", "in", "range", "(", "star", ".", "pld_order", ")", ":", "XM", "=", "star", ".", "X", "(", "n", ",", "m", ")", "A", "+=", "star", ".", "lam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "K", "+=", "A", "CDK", "=", "cho_factor", "(", "K", ")", "# Baseline", "med", "=", "np", ".", "nanmedian", "(", "star", ".", "fraw", "[", "m", "]", ")", "lnL0", "=", "-", "0.5", "*", "np", ".", "dot", "(", "star", ".", "fraw", "[", "m", "]", ",", "cho_solve", "(", "CDK", ",", "star", ".", "fraw", "[", "m", "]", ")", ")", "dt", "=", "np", ".", "median", "(", "np", ".", "diff", "(", "star", ".", "time", "[", "m", "]", ")", ")", "# Create a uniform time array and get indices of missing cadences", "tol", "=", "np", ".", "nanmedian", "(", "np", ".", "diff", "(", "star", ".", "time", "[", "m", "]", ")", ")", "/", "5.", "tunif", "=", "np", ".", "arange", "(", "star", ".", "time", "[", "m", "]", "[", "0", "]", ",", "star", ".", "time", "[", "m", "]", "[", "-", "1", "]", "+", "tol", ",", "dt", ")", "tnogaps", "=", "np", ".", "array", "(", "tunif", ")", "gaps", "=", "[", "]", "j", "=", "0", "for", "i", ",", "t", "in", "enumerate", "(", "tunif", ")", ":", "if", "np", ".", "abs", "(", "star", ".", "time", "[", "m", "]", "[", "j", "]", "-", "t", ")", "<", "tol", ":", "tnogaps", "[", "i", "]", "=", "star", ".", "time", "[", "m", "]", "[", "j", "]", "j", "+=", "1", "if", "j", "==", "len", "(", "star", ".", "time", "[", "m", "]", ")", ":", "break", "else", ":", "gaps", ".", "append", "(", "i", ")", "gaps", "=", "np", ".", "array", "(", "gaps", ",", "dtype", "=", "int", ")", "# Compute the normalized transit model for a single transit", "transit_model", "=", "TransitShape", "(", "*", "*", "ps_kwargs", ")", "# Now roll the transit model across each cadence", "dchisq", "=", "np", ".", "zeros", "(", "len", "(", "tnogaps", ")", ")", "d", "=", "np", ".", "zeros", "(", "len", "(", "tnogaps", ")", ")", "vard", "=", "np", ".", "zeros", "(", "len", "(", "tnogaps", ")", ")", "for", "i", "in", "prange", "(", "len", "(", "tnogaps", ")", ")", ":", "trn", "=", "transit_model", "(", "tnogaps", ",", "tnogaps", "[", "i", "]", ")", "trn", "=", "np", ".", "delete", "(", "trn", ",", "gaps", ")", "trn", "*=", "med", "vard", "[", "i", "]", "=", "1.", "/", "np", ".", "dot", "(", "trn", ",", "cho_solve", "(", "CDK", ",", "trn", ")", ")", "if", "not", "np", ".", "isfinite", "(", "vard", "[", "i", "]", ")", ":", "vard", "[", "i", "]", "=", "np", ".", "nan", "d", "[", "i", "]", "=", "np", ".", "nan", "dchisq", "[", "i", "]", "=", "np", ".", "nan", "continue", "d", "[", "i", "]", "=", "vard", "[", "i", "]", "*", "np", ".", "dot", "(", "trn", ",", "cho_solve", "(", "CDK", ",", "star", ".", "fraw", "[", "m", "]", ")", ")", "r", "=", "star", ".", "fraw", "[", "m", "]", "-", "trn", "*", "d", "[", "i", "]", "lnL", "=", "-", "0.5", "*", "np", ".", "dot", "(", "r", ",", "cho_solve", "(", "CDK", ",", "r", ")", ")", "dchisq", "[", "i", "]", "=", "-", "2", "*", "(", "lnL0", "-", "lnL", ")", "TIME", "=", "np", ".", "append", "(", "TIME", ",", "tnogaps", ")", "DEPTH", "=", "np", ".", "append", "(", "DEPTH", ",", "d", ")", "VARDEPTH", "=", "np", ".", "append", "(", "VARDEPTH", ",", "vard", ")", "DELCHISQ", "=", "np", ".", "append", "(", "DELCHISQ", ",", "dchisq", ")", "return", "TIME", ",", "DEPTH", ",", "VARDEPTH", ",", "DELCHISQ" ]
NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations.
[ "NOTE", ":", "pos_tol", "is", "the", "positive", "(", "i", ".", "e", ".", "above", "the", "median", ")", "outlier", "tolerance", "in", "standard", "deviations", ".", "NOTE", ":", "neg_tol", "is", "the", "negative", "(", "i", ".", "e", ".", "below", "the", "median", ")", "outlier", "tolerance", "in", "standard", "deviations", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/search.py#L29-L131
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.iterdirty
def iterdirty(self): '''Ordered iterator over dirty elements.''' return iter(chain(itervalues(self._new), itervalues(self._modified)))
python
def iterdirty(self): '''Ordered iterator over dirty elements.''' return iter(chain(itervalues(self._new), itervalues(self._modified)))
[ "def", "iterdirty", "(", "self", ")", ":", "return", "iter", "(", "chain", "(", "itervalues", "(", "self", ".", "_new", ")", ",", "itervalues", "(", "self", ".", "_modified", ")", ")", ")" ]
Ordered iterator over dirty elements.
[ "Ordered", "iterator", "over", "dirty", "elements", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L108-L110
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.add
def add(self, instance, modified=True, persistent=None, force_update=False): '''Add a new instance to this :class:`SessionModel`. :param modified: Optional flag indicating if the ``instance`` has been modified. By default its value is ``True``. :param force_update: if ``instance`` is persistent, it forces an update of the data rather than a full replacement. This is used by the :meth:`insert_update_replace` method. :rtype: The instance added to the session''' if instance._meta.type == 'structure': return self._add_structure(instance) state = instance.get_state() if state.deleted: raise ValueError('State is deleted. Cannot add.') self.pop(state.iid) pers = persistent if persistent is not None else state.persistent pkname = instance._meta.pkname() if not pers: instance._dbdata.pop(pkname, None) # to make sure it is add action state = instance.get_state(iid=None) elif persistent: instance._dbdata[pkname] = instance.pkvalue() state = instance.get_state(iid=instance.pkvalue()) else: action = 'update' if force_update else None state = instance.get_state(action=action, iid=state.iid) iid = state.iid if state.persistent: if modified: self._modified[iid] = instance else: self._new[iid] = instance return instance
python
def add(self, instance, modified=True, persistent=None, force_update=False): '''Add a new instance to this :class:`SessionModel`. :param modified: Optional flag indicating if the ``instance`` has been modified. By default its value is ``True``. :param force_update: if ``instance`` is persistent, it forces an update of the data rather than a full replacement. This is used by the :meth:`insert_update_replace` method. :rtype: The instance added to the session''' if instance._meta.type == 'structure': return self._add_structure(instance) state = instance.get_state() if state.deleted: raise ValueError('State is deleted. Cannot add.') self.pop(state.iid) pers = persistent if persistent is not None else state.persistent pkname = instance._meta.pkname() if not pers: instance._dbdata.pop(pkname, None) # to make sure it is add action state = instance.get_state(iid=None) elif persistent: instance._dbdata[pkname] = instance.pkvalue() state = instance.get_state(iid=instance.pkvalue()) else: action = 'update' if force_update else None state = instance.get_state(action=action, iid=state.iid) iid = state.iid if state.persistent: if modified: self._modified[iid] = instance else: self._new[iid] = instance return instance
[ "def", "add", "(", "self", ",", "instance", ",", "modified", "=", "True", ",", "persistent", "=", "None", ",", "force_update", "=", "False", ")", ":", "if", "instance", ".", "_meta", ".", "type", "==", "'structure'", ":", "return", "self", ".", "_add_structure", "(", "instance", ")", "state", "=", "instance", ".", "get_state", "(", ")", "if", "state", ".", "deleted", ":", "raise", "ValueError", "(", "'State is deleted. Cannot add.'", ")", "self", ".", "pop", "(", "state", ".", "iid", ")", "pers", "=", "persistent", "if", "persistent", "is", "not", "None", "else", "state", ".", "persistent", "pkname", "=", "instance", ".", "_meta", ".", "pkname", "(", ")", "if", "not", "pers", ":", "instance", ".", "_dbdata", ".", "pop", "(", "pkname", ",", "None", ")", "# to make sure it is add action\r", "state", "=", "instance", ".", "get_state", "(", "iid", "=", "None", ")", "elif", "persistent", ":", "instance", ".", "_dbdata", "[", "pkname", "]", "=", "instance", ".", "pkvalue", "(", ")", "state", "=", "instance", ".", "get_state", "(", "iid", "=", "instance", ".", "pkvalue", "(", ")", ")", "else", ":", "action", "=", "'update'", "if", "force_update", "else", "None", "state", "=", "instance", ".", "get_state", "(", "action", "=", "action", ",", "iid", "=", "state", ".", "iid", ")", "iid", "=", "state", ".", "iid", "if", "state", ".", "persistent", ":", "if", "modified", ":", "self", ".", "_modified", "[", "iid", "]", "=", "instance", "else", ":", "self", ".", "_new", "[", "iid", "]", "=", "instance", "return", "instance" ]
Add a new instance to this :class:`SessionModel`. :param modified: Optional flag indicating if the ``instance`` has been modified. By default its value is ``True``. :param force_update: if ``instance`` is persistent, it forces an update of the data rather than a full replacement. This is used by the :meth:`insert_update_replace` method. :rtype: The instance added to the session
[ "Add", "a", "new", "instance", "to", "this", ":", "class", ":", "SessionModel", ".", ":", "param", "modified", ":", "Optional", "flag", "indicating", "if", "the", "instance", "has", "been", "modified", ".", "By", "default", "its", "value", "is", "True", ".", ":", "param", "force_update", ":", "if", "instance", "is", "persistent", "it", "forces", "an", "update", "of", "the", "data", "rather", "than", "a", "full", "replacement", ".", "This", "is", "used", "by", "the", ":", "meth", ":", "insert_update_replace", "method", ".", ":", "rtype", ":", "The", "instance", "added", "to", "the", "session" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L119-L152
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.delete
def delete(self, instance, session): '''delete an *instance*''' if instance._meta.type == 'structure': return self._delete_structure(instance) inst = self.pop(instance) instance = inst if inst is not None else instance if instance is not None: state = instance.get_state() if state.persistent: state.deleted = True self._deleted[state.iid] = instance instance.session = session else: instance.session = None return instance
python
def delete(self, instance, session): '''delete an *instance*''' if instance._meta.type == 'structure': return self._delete_structure(instance) inst = self.pop(instance) instance = inst if inst is not None else instance if instance is not None: state = instance.get_state() if state.persistent: state.deleted = True self._deleted[state.iid] = instance instance.session = session else: instance.session = None return instance
[ "def", "delete", "(", "self", ",", "instance", ",", "session", ")", ":", "if", "instance", ".", "_meta", ".", "type", "==", "'structure'", ":", "return", "self", ".", "_delete_structure", "(", "instance", ")", "inst", "=", "self", ".", "pop", "(", "instance", ")", "instance", "=", "inst", "if", "inst", "is", "not", "None", "else", "instance", "if", "instance", "is", "not", "None", ":", "state", "=", "instance", ".", "get_state", "(", ")", "if", "state", ".", "persistent", ":", "state", ".", "deleted", "=", "True", "self", ".", "_deleted", "[", "state", ".", "iid", "]", "=", "instance", "instance", ".", "session", "=", "session", "else", ":", "instance", ".", "session", "=", "None", "return", "instance" ]
delete an *instance*
[ "delete", "an", "*", "instance", "*" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L154-L168
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.pop
def pop(self, instance): '''Remove ``instance`` from the :class:`SessionModel`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an ``id``. :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' if isinstance(instance, self.model): iid = instance.get_state().iid else: iid = instance instance = None for d in (self._new, self._modified, self._deleted): if iid in d: inst = d.pop(iid) if instance is None: instance = inst elif inst is not instance: raise ValueError('Critical error: %s is duplicated' % iid) return instance
python
def pop(self, instance): '''Remove ``instance`` from the :class:`SessionModel`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an ``id``. :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' if isinstance(instance, self.model): iid = instance.get_state().iid else: iid = instance instance = None for d in (self._new, self._modified, self._deleted): if iid in d: inst = d.pop(iid) if instance is None: instance = inst elif inst is not instance: raise ValueError('Critical error: %s is duplicated' % iid) return instance
[ "def", "pop", "(", "self", ",", "instance", ")", ":", "if", "isinstance", "(", "instance", ",", "self", ".", "model", ")", ":", "iid", "=", "instance", ".", "get_state", "(", ")", ".", "iid", "else", ":", "iid", "=", "instance", "instance", "=", "None", "for", "d", "in", "(", "self", ".", "_new", ",", "self", ".", "_modified", ",", "self", ".", "_deleted", ")", ":", "if", "iid", "in", "d", ":", "inst", "=", "d", ".", "pop", "(", "iid", ")", "if", "instance", "is", "None", ":", "instance", "=", "inst", "elif", "inst", "is", "not", "instance", ":", "raise", "ValueError", "(", "'Critical error: %s is duplicated'", "%", "iid", ")", "return", "instance" ]
Remove ``instance`` from the :class:`SessionModel`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an ``id``. :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session.
[ "Remove", "instance", "from", "the", ":", "class", ":", "SessionModel", ".", "Instance", "could", "be", "a", ":", "class", ":", "Model", "or", "an", "id", ".", ":", "parameter", "instance", ":", "a", ":", "class", ":", "Model", "or", "an", "id", ".", ":", "rtype", ":", "the", ":", "class", ":", "Model", "removed", "from", "session", "or", "None", "if", "it", "was", "not", "in", "the", "session", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L170-L190
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.expunge
def expunge(self, instance): '''Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' instance = self.pop(instance) instance.session = None return instance
python
def expunge(self, instance): '''Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' instance = self.pop(instance) instance.session = None return instance
[ "def", "expunge", "(", "self", ",", "instance", ")", ":", "instance", "=", "self", ".", "pop", "(", "instance", ")", "instance", ".", "session", "=", "None", "return", "instance" ]
Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session.
[ "Remove", "*", "instance", "*", "from", "the", ":", "class", ":", "Session", ".", "Instance", "could", "be", "a", ":", "class", ":", "Model", "or", "an", "id", ".", ":", "parameter", "instance", ":", "a", ":", "class", ":", "Model", "or", "an", "*", "id", "*", ":", "rtype", ":", "the", ":", "class", ":", "Model", "removed", "from", "session", "or", "None", "if", "it", "was", "not", "in", "the", "session", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L192-L202
lsbardel/python-stdnet
stdnet/odm/session.py
SessionModel.post_commit
def post_commit(self, results): '''\ Process results after a commit. :parameter results: iterator over :class:`stdnet.instance_session_result` items. :rtype: a two elements tuple containing a list of instances saved and a list of ids of instances deleted.''' tpy = self._meta.pk_to_python instances = [] deleted = [] errors = [] # The length of results must be the same as the length of # all committed instances for result in results: if isinstance(result, Exception): errors.append(result.__class__('Exception while committing %s.' ' %s' % (self._meta, result))) continue instance = self.pop(result.iid) id = tpy(result.id, self.backend) if result.deleted: deleted.append(id) else: if instance is None: raise InvalidTransaction('{0} session received id "{1}"\ which is not in the session.'.format(self, result.iid)) setattr(instance, instance._meta.pkname(), id) instance = self.add(instance, modified=False, persistent=result.persistent) instance.get_state().score = result.score if instance.get_state().persistent: instances.append(instance) return instances, deleted, errors
python
def post_commit(self, results): '''\ Process results after a commit. :parameter results: iterator over :class:`stdnet.instance_session_result` items. :rtype: a two elements tuple containing a list of instances saved and a list of ids of instances deleted.''' tpy = self._meta.pk_to_python instances = [] deleted = [] errors = [] # The length of results must be the same as the length of # all committed instances for result in results: if isinstance(result, Exception): errors.append(result.__class__('Exception while committing %s.' ' %s' % (self._meta, result))) continue instance = self.pop(result.iid) id = tpy(result.id, self.backend) if result.deleted: deleted.append(id) else: if instance is None: raise InvalidTransaction('{0} session received id "{1}"\ which is not in the session.'.format(self, result.iid)) setattr(instance, instance._meta.pkname(), id) instance = self.add(instance, modified=False, persistent=result.persistent) instance.get_state().score = result.score if instance.get_state().persistent: instances.append(instance) return instances, deleted, errors
[ "def", "post_commit", "(", "self", ",", "results", ")", ":", "tpy", "=", "self", ".", "_meta", ".", "pk_to_python", "instances", "=", "[", "]", "deleted", "=", "[", "]", "errors", "=", "[", "]", "# The length of results must be the same as the length of\r", "# all committed instances\r", "for", "result", "in", "results", ":", "if", "isinstance", "(", "result", ",", "Exception", ")", ":", "errors", ".", "append", "(", "result", ".", "__class__", "(", "'Exception while committing %s.'", "' %s'", "%", "(", "self", ".", "_meta", ",", "result", ")", ")", ")", "continue", "instance", "=", "self", ".", "pop", "(", "result", ".", "iid", ")", "id", "=", "tpy", "(", "result", ".", "id", ",", "self", ".", "backend", ")", "if", "result", ".", "deleted", ":", "deleted", ".", "append", "(", "id", ")", "else", ":", "if", "instance", "is", "None", ":", "raise", "InvalidTransaction", "(", "'{0} session received id \"{1}\"\\\r\n which is not in the session.'", ".", "format", "(", "self", ",", "result", ".", "iid", ")", ")", "setattr", "(", "instance", ",", "instance", ".", "_meta", ".", "pkname", "(", ")", ",", "id", ")", "instance", "=", "self", ".", "add", "(", "instance", ",", "modified", "=", "False", ",", "persistent", "=", "result", ".", "persistent", ")", "instance", ".", "get_state", "(", ")", ".", "score", "=", "result", ".", "score", "if", "instance", ".", "get_state", "(", ")", ".", "persistent", ":", "instances", ".", "append", "(", "instance", ")", "return", "instances", ",", "deleted", ",", "errors" ]
\ Process results after a commit. :parameter results: iterator over :class:`stdnet.instance_session_result` items. :rtype: a two elements tuple containing a list of instances saved and a list of ids of instances deleted.
[ "\\", "Process", "results", "after", "a", "commit", ".", ":", "parameter", "results", ":", "iterator", "over", ":", "class", ":", "stdnet", ".", "instance_session_result", "items", ".", ":", "rtype", ":", "a", "two", "elements", "tuple", "containing", "a", "list", "of", "instances", "saved", "and", "a", "list", "of", "ids", "of", "instances", "deleted", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L204-L238
lsbardel/python-stdnet
stdnet/odm/session.py
Transaction.commit
def commit(self, callback=None): '''Close the transaction and commit session to the backend.''' if self.executed: raise InvalidTransaction('Invalid operation. ' 'Transaction already executed.') session = self.session self.session = None self.on_result = self._commit(session, callback) return self.on_result
python
def commit(self, callback=None): '''Close the transaction and commit session to the backend.''' if self.executed: raise InvalidTransaction('Invalid operation. ' 'Transaction already executed.') session = self.session self.session = None self.on_result = self._commit(session, callback) return self.on_result
[ "def", "commit", "(", "self", ",", "callback", "=", "None", ")", ":", "if", "self", ".", "executed", ":", "raise", "InvalidTransaction", "(", "'Invalid operation. '", "'Transaction already executed.'", ")", "session", "=", "self", ".", "session", "self", ".", "session", "=", "None", "self", ".", "on_result", "=", "self", ".", "_commit", "(", "session", ",", "callback", ")", "return", "self", ".", "on_result" ]
Close the transaction and commit session to the backend.
[ "Close", "the", "transaction", "and", "commit", "session", "to", "the", "backend", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L427-L435
lsbardel/python-stdnet
stdnet/odm/session.py
Session.dirty
def dirty(self): '''The set of instances in this :class:`Session` which have been modified.''' return frozenset(chain(*tuple((sm.dirty for sm in itervalues(self._models)))))
python
def dirty(self): '''The set of instances in this :class:`Session` which have been modified.''' return frozenset(chain(*tuple((sm.dirty for sm in itervalues(self._models)))))
[ "def", "dirty", "(", "self", ")", ":", "return", "frozenset", "(", "chain", "(", "*", "tuple", "(", "(", "sm", ".", "dirty", "for", "sm", "in", "itervalues", "(", "self", ".", "_models", ")", ")", ")", ")", ")" ]
The set of instances in this :class:`Session` which have been modified.
[ "The", "set", "of", "instances", "in", "this", ":", "class", ":", "Session", "which", "have", "been", "modified", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L547-L551
lsbardel/python-stdnet
stdnet/odm/session.py
Session.begin
def begin(self, **options): '''Begin a new :class:`Transaction`. If this :class:`Session` is already in a :ref:`transactional state <transactional-state>`, an error will occur. It returns the :attr:`transaction` attribute. This method is mostly used within a ``with`` statement block:: with session.begin() as t: t.add(...) ... which is equivalent to:: t = session.begin() t.add(...) ... session.commit() ``options`` parameters are passed to the :class:`Transaction` constructor. ''' if self.transaction is not None: raise InvalidTransaction("A transaction is already begun.") else: self.transaction = Transaction(self, **options) return self.transaction
python
def begin(self, **options): '''Begin a new :class:`Transaction`. If this :class:`Session` is already in a :ref:`transactional state <transactional-state>`, an error will occur. It returns the :attr:`transaction` attribute. This method is mostly used within a ``with`` statement block:: with session.begin() as t: t.add(...) ... which is equivalent to:: t = session.begin() t.add(...) ... session.commit() ``options`` parameters are passed to the :class:`Transaction` constructor. ''' if self.transaction is not None: raise InvalidTransaction("A transaction is already begun.") else: self.transaction = Transaction(self, **options) return self.transaction
[ "def", "begin", "(", "self", ",", "*", "*", "options", ")", ":", "if", "self", ".", "transaction", "is", "not", "None", ":", "raise", "InvalidTransaction", "(", "\"A transaction is already begun.\"", ")", "else", ":", "self", ".", "transaction", "=", "Transaction", "(", "self", ",", "*", "*", "options", ")", "return", "self", ".", "transaction" ]
Begin a new :class:`Transaction`. If this :class:`Session` is already in a :ref:`transactional state <transactional-state>`, an error will occur. It returns the :attr:`transaction` attribute. This method is mostly used within a ``with`` statement block:: with session.begin() as t: t.add(...) ... which is equivalent to:: t = session.begin() t.add(...) ... session.commit() ``options`` parameters are passed to the :class:`Transaction` constructor.
[ "Begin", "a", "new", ":", "class", ":", "Transaction", ".", "If", "this", ":", "class", ":", "Session", "is", "already", "in", "a", ":", "ref", ":", "transactional", "state", "<transactional", "-", "state", ">", "an", "error", "will", "occur", ".", "It", "returns", "the", ":", "attr", ":", "transaction", "attribute", ".", "This", "method", "is", "mostly", "used", "within", "a", "with", "statement", "block", "::", "with", "session", ".", "begin", "()", "as", "t", ":", "t", ".", "add", "(", "...", ")", "...", "which", "is", "equivalent", "to", "::", "t", "=", "session", ".", "begin", "()", "t", ".", "add", "(", "...", ")", "...", "session", ".", "commit", "()", "options", "parameters", "are", "passed", "to", "the", ":", "class", ":", "Transaction", "constructor", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L553-L577
lsbardel/python-stdnet
stdnet/odm/session.py
Session.query
def query(self, model, **kwargs): '''Create a new :class:`Query` for *model*.''' sm = self.model(model) query_class = sm.manager.query_class or Query return query_class(sm._meta, self, **kwargs)
python
def query(self, model, **kwargs): '''Create a new :class:`Query` for *model*.''' sm = self.model(model) query_class = sm.manager.query_class or Query return query_class(sm._meta, self, **kwargs)
[ "def", "query", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "sm", "=", "self", ".", "model", "(", "model", ")", "query_class", "=", "sm", ".", "manager", ".", "query_class", "or", "Query", "return", "query_class", "(", "sm", ".", "_meta", ",", "self", ",", "*", "*", "kwargs", ")" ]
Create a new :class:`Query` for *model*.
[ "Create", "a", "new", ":", "class", ":", "Query", "for", "*", "model", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L590-L594
lsbardel/python-stdnet
stdnet/odm/session.py
Session.update_or_create
def update_or_create(self, model, **kwargs): '''Update or create a new instance of ``model``. This method can raise an exception if the ``kwargs`` dictionary contains field data that does not validate. :param model: a :class:`StdModel` :param kwargs: dictionary of parameters. :returns: A two elements tuple containing the instance and a boolean indicating if the instance was created or not. ''' backend = self.model(model).backend return backend.execute(self._update_or_create(model, **kwargs))
python
def update_or_create(self, model, **kwargs): '''Update or create a new instance of ``model``. This method can raise an exception if the ``kwargs`` dictionary contains field data that does not validate. :param model: a :class:`StdModel` :param kwargs: dictionary of parameters. :returns: A two elements tuple containing the instance and a boolean indicating if the instance was created or not. ''' backend = self.model(model).backend return backend.execute(self._update_or_create(model, **kwargs))
[ "def", "update_or_create", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "backend", "=", "self", ".", "model", "(", "model", ")", ".", "backend", "return", "backend", ".", "execute", "(", "self", ".", "_update_or_create", "(", "model", ",", "*", "*", "kwargs", ")", ")" ]
Update or create a new instance of ``model``. This method can raise an exception if the ``kwargs`` dictionary contains field data that does not validate. :param model: a :class:`StdModel` :param kwargs: dictionary of parameters. :returns: A two elements tuple containing the instance and a boolean indicating if the instance was created or not.
[ "Update", "or", "create", "a", "new", "instance", "of", "model", ".", "This", "method", "can", "raise", "an", "exception", "if", "the", "kwargs", "dictionary", "contains", "field", "data", "that", "does", "not", "validate", ".", ":", "param", "model", ":", "a", ":", "class", ":", "StdModel", ":", "param", "kwargs", ":", "dictionary", "of", "parameters", ".", ":", "returns", ":", "A", "two", "elements", "tuple", "containing", "the", "instance", "and", "a", "boolean", "indicating", "if", "the", "instance", "was", "created", "or", "not", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L600-L612
lsbardel/python-stdnet
stdnet/odm/session.py
Session.add
def add(self, instance, modified=True, **params): '''Add an ``instance`` to the session. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the back-end server immediately. :parameter instance: a :class:`Model` instance. It must be registered with the :attr:`router` which created this :class:`Session`. :parameter modified: a boolean flag indicating if the instance was modified. :return: the ``instance``. If the instance is persistent (it is already stored in the database), an updated will be performed, otherwise a new entry will be created once the :meth:`commit` method is invoked. ''' sm = self.model(instance) instance.session = self o = sm.add(instance, modified=modified, **params) if modified and not self.transaction: transaction = self.begin() return transaction.commit(lambda: o) else: return o
python
def add(self, instance, modified=True, **params): '''Add an ``instance`` to the session. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the back-end server immediately. :parameter instance: a :class:`Model` instance. It must be registered with the :attr:`router` which created this :class:`Session`. :parameter modified: a boolean flag indicating if the instance was modified. :return: the ``instance``. If the instance is persistent (it is already stored in the database), an updated will be performed, otherwise a new entry will be created once the :meth:`commit` method is invoked. ''' sm = self.model(instance) instance.session = self o = sm.add(instance, modified=modified, **params) if modified and not self.transaction: transaction = self.begin() return transaction.commit(lambda: o) else: return o
[ "def", "add", "(", "self", ",", "instance", ",", "modified", "=", "True", ",", "*", "*", "params", ")", ":", "sm", "=", "self", ".", "model", "(", "instance", ")", "instance", ".", "session", "=", "self", "o", "=", "sm", ".", "add", "(", "instance", ",", "modified", "=", "modified", ",", "*", "*", "params", ")", "if", "modified", "and", "not", "self", ".", "transaction", ":", "transaction", "=", "self", ".", "begin", "(", ")", "return", "transaction", ".", "commit", "(", "lambda", ":", "o", ")", "else", ":", "return", "o" ]
Add an ``instance`` to the session. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the back-end server immediately. :parameter instance: a :class:`Model` instance. It must be registered with the :attr:`router` which created this :class:`Session`. :parameter modified: a boolean flag indicating if the instance was modified. :return: the ``instance``. If the instance is persistent (it is already stored in the database), an updated will be performed, otherwise a new entry will be created once the :meth:`commit` method is invoked.
[ "Add", "an", "instance", "to", "the", "session", ".", "If", "the", "session", "is", "not", "in", "a", ":", "ref", ":", "transactional", "state", "<transactional", "-", "state", ">", "this", "operation", "commits", "changes", "to", "the", "back", "-", "end", "server", "immediately", ".", ":", "parameter", "instance", ":", "a", ":", "class", ":", "Model", "instance", ".", "It", "must", "be", "registered", "with", "the", ":", "attr", ":", "router", "which", "created", "this", ":", "class", ":", "Session", ".", ":", "parameter", "modified", ":", "a", "boolean", "flag", "indicating", "if", "the", "instance", "was", "modified", ".", ":", "return", ":", "the", "instance", ".", "If", "the", "instance", "is", "persistent", "(", "it", "is", "already", "stored", "in", "the", "database", ")", "an", "updated", "will", "be", "performed", "otherwise", "a", "new", "entry", "will", "be", "created", "once", "the", ":", "meth", ":", "commit", "method", "is", "invoked", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L614-L638
lsbardel/python-stdnet
stdnet/odm/session.py
Session.delete
def delete(self, instance_or_query): '''Delete an ``instance`` or a ``query``. Adds ``instance_or_query`` to this :class:`Session` list of data to be deleted. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the backend server immediately. :parameter instance_or_query: a :class:`Model` instance or a :class:`Query`. ''' sm = self.model(instance_or_query) # not an instance of a Model. Assume it is a query. if is_query(instance_or_query): if instance_or_query.session is not self: raise ValueError('Adding a query generated by another session') sm._delete_query.append(instance_or_query) else: instance_or_query = sm.delete(instance_or_query, self) if not self.transaction: transaction = self.begin() return transaction.commit( lambda: transaction.deleted.get(sm._meta)) else: return instance_or_query
python
def delete(self, instance_or_query): '''Delete an ``instance`` or a ``query``. Adds ``instance_or_query`` to this :class:`Session` list of data to be deleted. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the backend server immediately. :parameter instance_or_query: a :class:`Model` instance or a :class:`Query`. ''' sm = self.model(instance_or_query) # not an instance of a Model. Assume it is a query. if is_query(instance_or_query): if instance_or_query.session is not self: raise ValueError('Adding a query generated by another session') sm._delete_query.append(instance_or_query) else: instance_or_query = sm.delete(instance_or_query, self) if not self.transaction: transaction = self.begin() return transaction.commit( lambda: transaction.deleted.get(sm._meta)) else: return instance_or_query
[ "def", "delete", "(", "self", ",", "instance_or_query", ")", ":", "sm", "=", "self", ".", "model", "(", "instance_or_query", ")", "# not an instance of a Model. Assume it is a query.\r", "if", "is_query", "(", "instance_or_query", ")", ":", "if", "instance_or_query", ".", "session", "is", "not", "self", ":", "raise", "ValueError", "(", "'Adding a query generated by another session'", ")", "sm", ".", "_delete_query", ".", "append", "(", "instance_or_query", ")", "else", ":", "instance_or_query", "=", "sm", ".", "delete", "(", "instance_or_query", ",", "self", ")", "if", "not", "self", ".", "transaction", ":", "transaction", "=", "self", ".", "begin", "(", ")", "return", "transaction", ".", "commit", "(", "lambda", ":", "transaction", ".", "deleted", ".", "get", "(", "sm", ".", "_meta", ")", ")", "else", ":", "return", "instance_or_query" ]
Delete an ``instance`` or a ``query``. Adds ``instance_or_query`` to this :class:`Session` list of data to be deleted. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the backend server immediately. :parameter instance_or_query: a :class:`Model` instance or a :class:`Query`.
[ "Delete", "an", "instance", "or", "a", "query", ".", "Adds", "instance_or_query", "to", "this", ":", "class", ":", "Session", "list", "of", "data", "to", "be", "deleted", ".", "If", "the", "session", "is", "not", "in", "a", ":", "ref", ":", "transactional", "state", "<transactional", "-", "state", ">", "this", "operation", "commits", "changes", "to", "the", "backend", "server", "immediately", ".", ":", "parameter", "instance_or_query", ":", "a", ":", "class", ":", "Model", "instance", "or", "a", ":", "class", ":", "Query", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L640-L664
lsbardel/python-stdnet
stdnet/odm/session.py
Session.model
def model(self, model, create=True): '''Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.''' manager = self.manager(model) sm = self._models.get(manager) if sm is None and create: sm = SessionModel(manager) self._models[manager] = sm return sm
python
def model(self, model, create=True): '''Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.''' manager = self.manager(model) sm = self._models.get(manager) if sm is None and create: sm = SessionModel(manager) self._models[manager] = sm return sm
[ "def", "model", "(", "self", ",", "model", ",", "create", "=", "True", ")", ":", "manager", "=", "self", ".", "manager", "(", "model", ")", "sm", "=", "self", ".", "_models", ".", "get", "(", "manager", ")", "if", "sm", "is", "None", "and", "create", ":", "sm", "=", "SessionModel", "(", "manager", ")", "self", ".", "_models", "[", "manager", "]", "=", "sm", "return", "sm" ]
Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.
[ "Returns", "the", ":", "class", ":", "SessionModel", "for", "model", "which", "can", "be", ":", "class", ":", "Model", "or", "a", ":", "class", ":", "MetaClass", "or", "an", "instance", "of", ":", "class", ":", "Model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L684-L693
lsbardel/python-stdnet
stdnet/odm/session.py
Session.expunge
def expunge(self, instance=None): '''Remove ``instance`` from this :class:`Session`. If ``instance`` is not given, it removes all instances from this :class:`Session`.''' if instance is not None: sm = self._models.get(instance._meta) if sm: return sm.expunge(instance) else: self._models.clear()
python
def expunge(self, instance=None): '''Remove ``instance`` from this :class:`Session`. If ``instance`` is not given, it removes all instances from this :class:`Session`.''' if instance is not None: sm = self._models.get(instance._meta) if sm: return sm.expunge(instance) else: self._models.clear()
[ "def", "expunge", "(", "self", ",", "instance", "=", "None", ")", ":", "if", "instance", "is", "not", "None", ":", "sm", "=", "self", ".", "_models", ".", "get", "(", "instance", ".", "_meta", ")", "if", "sm", ":", "return", "sm", ".", "expunge", "(", "instance", ")", "else", ":", "self", ".", "_models", ".", "clear", "(", ")" ]
Remove ``instance`` from this :class:`Session`. If ``instance`` is not given, it removes all instances from this :class:`Session`.
[ "Remove", "instance", "from", "this", ":", "class", ":", "Session", ".", "If", "instance", "is", "not", "given", "it", "removes", "all", "instances", "from", "this", ":", "class", ":", "Session", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L695-L703
lsbardel/python-stdnet
stdnet/odm/session.py
Session.manager
def manager(self, model): '''Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method.''' try: return self.router[model] except KeyError: meta = getattr(model, '_meta', model) if meta.type == 'structure': # this is a structure if hasattr(model, 'model'): structure_model = model.model if structure_model: return self.manager(structure_model) else: manager = self.router.structure(model) if manager: return manager raise InvalidTransaction('"%s" not valid in this session' % meta)
python
def manager(self, model): '''Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method.''' try: return self.router[model] except KeyError: meta = getattr(model, '_meta', model) if meta.type == 'structure': # this is a structure if hasattr(model, 'model'): structure_model = model.model if structure_model: return self.manager(structure_model) else: manager = self.router.structure(model) if manager: return manager raise InvalidTransaction('"%s" not valid in this session' % meta)
[ "def", "manager", "(", "self", ",", "model", ")", ":", "try", ":", "return", "self", ".", "router", "[", "model", "]", "except", "KeyError", ":", "meta", "=", "getattr", "(", "model", ",", "'_meta'", ",", "model", ")", "if", "meta", ".", "type", "==", "'structure'", ":", "# this is a structure\r", "if", "hasattr", "(", "model", ",", "'model'", ")", ":", "structure_model", "=", "model", ".", "model", "if", "structure_model", ":", "return", "self", ".", "manager", "(", "structure_model", ")", "else", ":", "manager", "=", "self", ".", "router", ".", "structure", "(", "model", ")", "if", "manager", ":", "return", "manager", "raise", "InvalidTransaction", "(", "'\"%s\" not valid in this session'", "%", "meta", ")" ]
Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method.
[ "Retrieve", "the", ":", "class", ":", "Manager", "for", "model", "which", "can", "be", "any", "of", "the", "values", "valid", "for", "the", ":", "meth", ":", "model", "method", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L705-L722
lsbardel/python-stdnet
stdnet/odm/session.py
Manager.new
def new(self, *args, **kwargs): '''Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs)) ''' return self.session().add(self.model(*args, **kwargs))
python
def new(self, *args, **kwargs): '''Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs)) ''' return self.session().add(self.model(*args, **kwargs))
[ "def", "new", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "session", "(", ")", ".", "add", "(", "self", ".", "model", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs))
[ "Create", "a", "new", "instance", "of", ":", "attr", ":", "model", "and", "commit", "it", "to", "the", "backend", "server", ".", "This", "a", "shortcut", "method", "for", "the", "more", "verbose", "::", "instance", "=", "manager", ".", "session", "()", ".", "add", "(", "MyModel", "(", "**", "kwargs", "))" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L924-L930
lsbardel/python-stdnet
stdnet/odm/session.py
Manager.query
def query(self, session=None): '''Returns a new :class:`Query` for :attr:`Manager.model`.''' if session is None or session.router is not self.router: session = self.session() return session.query(self.model)
python
def query(self, session=None): '''Returns a new :class:`Query` for :attr:`Manager.model`.''' if session is None or session.router is not self.router: session = self.session() return session.query(self.model)
[ "def", "query", "(", "self", ",", "session", "=", "None", ")", ":", "if", "session", "is", "None", "or", "session", ".", "router", "is", "not", "self", ".", "router", ":", "session", "=", "self", ".", "session", "(", ")", "return", "session", ".", "query", "(", "self", ".", "model", ")" ]
Returns a new :class:`Query` for :attr:`Manager.model`.
[ "Returns", "a", "new", ":", "class", ":", "Query", "for", ":", "attr", ":", "Manager", ".", "model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L957-L961
lsbardel/python-stdnet
stdnet/odm/session.py
Manager.search
def search(self, text, lookup=None): '''Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.''' return self.query().search(text, lookup=lookup)
python
def search(self, text, lookup=None): '''Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.''' return self.query().search(text, lookup=lookup)
[ "def", "search", "(", "self", ",", "text", ",", "lookup", "=", "None", ")", ":", "return", "self", ".", "query", "(", ")", ".", "search", "(", "text", ",", "lookup", "=", "lookup", ")" ]
Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.
[ "Returns", "a", "new", ":", "class", ":", "Query", "for", ":", "attr", ":", "Manager", ".", "model", "with", "a", "full", "text", "search", "value", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L977-L980
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
pairs_to_dict
def pairs_to_dict(response, encoding): "Create a dict given a list of key/value pairs" it = iter(response) return dict(((k.decode(encoding), v) for k, v in zip(it, it)))
python
def pairs_to_dict(response, encoding): "Create a dict given a list of key/value pairs" it = iter(response) return dict(((k.decode(encoding), v) for k, v in zip(it, it)))
[ "def", "pairs_to_dict", "(", "response", ",", "encoding", ")", ":", "it", "=", "iter", "(", "response", ")", "return", "dict", "(", "(", "(", "k", ".", "decode", "(", "encoding", ")", ",", "v", ")", "for", "k", ",", "v", "in", "zip", "(", "it", ",", "it", ")", ")", ")" ]
Create a dict given a list of key/value pairs
[ "Create", "a", "dict", "given", "a", "list", "of", "key", "/", "value", "pairs" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L36-L39
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
odmrun.load_related
def load_related(self, meta, fname, data, fields, encoding): '''Parse data for related objects.''' field = meta.dfields[fname] if field in meta.multifields: fmeta = field.structure_class()._meta if fmeta.name in ('hashtable', 'zset'): return ((native_str(id, encoding), pairs_to_dict(fdata, encoding)) for id, fdata in data) else: return ((native_str(id, encoding), fdata) for id, fdata in data) else: # this is data for stdmodel instances return self.build(data, meta, fields, fields, encoding)
python
def load_related(self, meta, fname, data, fields, encoding): '''Parse data for related objects.''' field = meta.dfields[fname] if field in meta.multifields: fmeta = field.structure_class()._meta if fmeta.name in ('hashtable', 'zset'): return ((native_str(id, encoding), pairs_to_dict(fdata, encoding)) for id, fdata in data) else: return ((native_str(id, encoding), fdata) for id, fdata in data) else: # this is data for stdmodel instances return self.build(data, meta, fields, fields, encoding)
[ "def", "load_related", "(", "self", ",", "meta", ",", "fname", ",", "data", ",", "fields", ",", "encoding", ")", ":", "field", "=", "meta", ".", "dfields", "[", "fname", "]", "if", "field", "in", "meta", ".", "multifields", ":", "fmeta", "=", "field", ".", "structure_class", "(", ")", ".", "_meta", "if", "fmeta", ".", "name", "in", "(", "'hashtable'", ",", "'zset'", ")", ":", "return", "(", "(", "native_str", "(", "id", ",", "encoding", ")", ",", "pairs_to_dict", "(", "fdata", ",", "encoding", ")", ")", "for", "id", ",", "fdata", "in", "data", ")", "else", ":", "return", "(", "(", "native_str", "(", "id", ",", "encoding", ")", ",", "fdata", ")", "for", "id", ",", "fdata", "in", "data", ")", "else", ":", "# this is data for stdmodel instances\r", "return", "self", ".", "build", "(", "data", ",", "meta", ",", "fields", ",", "fields", ",", "encoding", ")" ]
Parse data for related objects.
[ "Parse", "data", "for", "related", "objects", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L107-L121
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
RedisQuery._execute_query
def _execute_query(self): '''Execute the query without fetching data. Returns the number of elements in the query.''' pipe = self.pipe if not self.card: if self.meta.ordering: self.ismember = getattr(self.backend.client, 'zrank') self.card = getattr(pipe, 'zcard') self._check_member = self.zism else: self.ismember = getattr(self.backend.client, 'sismember') self.card = getattr(pipe, 'scard') self._check_member = self.sism else: self.ismember = None self.card(self.query_key) result = yield pipe.execute() yield result[-1]
python
def _execute_query(self): '''Execute the query without fetching data. Returns the number of elements in the query.''' pipe = self.pipe if not self.card: if self.meta.ordering: self.ismember = getattr(self.backend.client, 'zrank') self.card = getattr(pipe, 'zcard') self._check_member = self.zism else: self.ismember = getattr(self.backend.client, 'sismember') self.card = getattr(pipe, 'scard') self._check_member = self.sism else: self.ismember = None self.card(self.query_key) result = yield pipe.execute() yield result[-1]
[ "def", "_execute_query", "(", "self", ")", ":", "pipe", "=", "self", ".", "pipe", "if", "not", "self", ".", "card", ":", "if", "self", ".", "meta", ".", "ordering", ":", "self", ".", "ismember", "=", "getattr", "(", "self", ".", "backend", ".", "client", ",", "'zrank'", ")", "self", ".", "card", "=", "getattr", "(", "pipe", ",", "'zcard'", ")", "self", ".", "_check_member", "=", "self", ".", "zism", "else", ":", "self", ".", "ismember", "=", "getattr", "(", "self", ".", "backend", ".", "client", ",", "'sismember'", ")", "self", ".", "card", "=", "getattr", "(", "pipe", ",", "'scard'", ")", "self", ".", "_check_member", "=", "self", ".", "sism", "else", ":", "self", ".", "ismember", "=", "None", "self", ".", "card", "(", "self", ".", "query_key", ")", "result", "=", "yield", "pipe", ".", "execute", "(", ")", "yield", "result", "[", "-", "1", "]" ]
Execute the query without fetching data. Returns the number of elements in the query.
[ "Execute", "the", "query", "without", "fetching", "data", ".", "Returns", "the", "number", "of", "elements", "in", "the", "query", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L222-L239
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
RedisQuery.order
def order(self, last): '''Perform ordering with respect model fields.''' desc = last.desc field = last.name nested = last.nested nested_args = [] while nested: meta = nested.model._meta nested_args.extend((self.backend.basekey(meta), nested.name)) last = nested nested = nested.nested method = 'ALPHA' if last.field.internal_type == 'text' else '' if field == last.model._meta.pkname(): field = '' return {'field': field, 'method': method, 'desc': desc, 'nested': nested_args}
python
def order(self, last): '''Perform ordering with respect model fields.''' desc = last.desc field = last.name nested = last.nested nested_args = [] while nested: meta = nested.model._meta nested_args.extend((self.backend.basekey(meta), nested.name)) last = nested nested = nested.nested method = 'ALPHA' if last.field.internal_type == 'text' else '' if field == last.model._meta.pkname(): field = '' return {'field': field, 'method': method, 'desc': desc, 'nested': nested_args}
[ "def", "order", "(", "self", ",", "last", ")", ":", "desc", "=", "last", ".", "desc", "field", "=", "last", ".", "name", "nested", "=", "last", ".", "nested", "nested_args", "=", "[", "]", "while", "nested", ":", "meta", "=", "nested", ".", "model", ".", "_meta", "nested_args", ".", "extend", "(", "(", "self", ".", "backend", ".", "basekey", "(", "meta", ")", ",", "nested", ".", "name", ")", ")", "last", "=", "nested", "nested", "=", "nested", ".", "nested", "method", "=", "'ALPHA'", "if", "last", ".", "field", ".", "internal_type", "==", "'text'", "else", "''", "if", "field", "==", "last", ".", "model", ".", "_meta", ".", "pkname", "(", ")", ":", "field", "=", "''", "return", "{", "'field'", ":", "field", ",", "'method'", ":", "method", ",", "'desc'", ":", "desc", ",", "'nested'", ":", "nested_args", "}" ]
Perform ordering with respect model fields.
[ "Perform", "ordering", "with", "respect", "model", "fields", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L241-L258
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
RedisQuery.related_lua_args
def related_lua_args(self): '''Generator of load_related arguments''' related = self.queryelem.select_related if related: meta = self.meta for rel in related: field = meta.dfields[rel] relmodel = field.relmodel bk = self.backend.basekey(relmodel._meta) if relmodel else '' fields = list(related[rel]) if meta.pkname() in fields: fields.remove(meta.pkname()) if not fields: fields.append('') ftype = field.type if field in meta.multifields else '' data = {'field': field.attname, 'type': ftype, 'bk': bk, 'fields': fields} yield field.name, data
python
def related_lua_args(self): '''Generator of load_related arguments''' related = self.queryelem.select_related if related: meta = self.meta for rel in related: field = meta.dfields[rel] relmodel = field.relmodel bk = self.backend.basekey(relmodel._meta) if relmodel else '' fields = list(related[rel]) if meta.pkname() in fields: fields.remove(meta.pkname()) if not fields: fields.append('') ftype = field.type if field in meta.multifields else '' data = {'field': field.attname, 'type': ftype, 'bk': bk, 'fields': fields} yield field.name, data
[ "def", "related_lua_args", "(", "self", ")", ":", "related", "=", "self", ".", "queryelem", ".", "select_related", "if", "related", ":", "meta", "=", "self", ".", "meta", "for", "rel", "in", "related", ":", "field", "=", "meta", ".", "dfields", "[", "rel", "]", "relmodel", "=", "field", ".", "relmodel", "bk", "=", "self", ".", "backend", ".", "basekey", "(", "relmodel", ".", "_meta", ")", "if", "relmodel", "else", "''", "fields", "=", "list", "(", "related", "[", "rel", "]", ")", "if", "meta", ".", "pkname", "(", ")", "in", "fields", ":", "fields", ".", "remove", "(", "meta", ".", "pkname", "(", ")", ")", "if", "not", "fields", ":", "fields", ".", "append", "(", "''", ")", "ftype", "=", "field", ".", "type", "if", "field", "in", "meta", ".", "multifields", "else", "''", "data", "=", "{", "'field'", ":", "field", ".", "attname", ",", "'type'", ":", "ftype", ",", "'bk'", ":", "bk", ",", "'fields'", ":", "fields", "}", "yield", "field", ".", "name", ",", "data" ]
Generator of load_related arguments
[ "Generator", "of", "load_related", "arguments" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L346-L363
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
Zset.ipop_range
def ipop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by rank (index).''' return self.backend.execute( self.client.zpopbyrank(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
python
def ipop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by rank (index).''' return self.backend.execute( self.client.zpopbyrank(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
[ "def", "ipop_range", "(", "self", ",", "start", ",", "stop", "=", "None", ",", "withscores", "=", "True", ",", "*", "*", "options", ")", ":", "return", "self", ".", "backend", ".", "execute", "(", "self", ".", "client", ".", "zpopbyrank", "(", "self", ".", "id", ",", "start", ",", "stop", ",", "withscores", "=", "withscores", ",", "*", "*", "options", ")", ",", "partial", "(", "self", ".", "_range", ",", "withscores", ")", ")" ]
Remove and return a range from the ordered set by rank (index).
[ "Remove", "and", "return", "a", "range", "from", "the", "ordered", "set", "by", "rank", "(", "index", ")", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L482-L487
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
Zset.pop_range
def pop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by score.''' return self.backend.execute( self.client.zpopbyscore(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
python
def pop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by score.''' return self.backend.execute( self.client.zpopbyscore(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
[ "def", "pop_range", "(", "self", ",", "start", ",", "stop", "=", "None", ",", "withscores", "=", "True", ",", "*", "*", "options", ")", ":", "return", "self", ".", "backend", ".", "execute", "(", "self", ".", "client", ".", "zpopbyscore", "(", "self", ".", "id", ",", "start", ",", "stop", ",", "withscores", "=", "withscores", ",", "*", "*", "options", ")", ",", "partial", "(", "self", ".", "_range", ",", "withscores", ")", ")" ]
Remove and return a range from the ordered set by score.
[ "Remove", "and", "return", "a", "range", "from", "the", "ordered", "set", "by", "score", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L489-L494
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
BackendDataServer.meta
def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' data = meta.as_dict() data['namespace'] = self.basekey(meta) return data
python
def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' data = meta.as_dict() data['namespace'] = self.basekey(meta) return data
[ "def", "meta", "(", "self", ",", "meta", ")", ":", "data", "=", "meta", ".", "as_dict", "(", ")", "data", "[", "'namespace'", "]", "=", "self", ".", "basekey", "(", "meta", ")", "return", "data" ]
Extract model metadata for lua script stdnet/lib/lua/odm.lua
[ "Extract", "model", "metadata", "for", "lua", "script", "stdnet", "/", "lib", "/", "lua", "/", "odm", ".", "lua" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L755-L759
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
BackendDataServer.execute_session
def execute_session(self, session_data): '''Execute a session in redis.''' pipe = self.client.pipeline() for sm in session_data: # loop through model sessions meta = sm.meta if sm.structures: self.flush_structure(sm, pipe) delquery = None if sm.deletes is not None: delquery = sm.deletes.backend_query(pipe=pipe) self.accumulate_delete(pipe, delquery) if sm.dirty: meta_info = json.dumps(self.meta(meta)) lua_data = [len(sm.dirty)] processed = [] for instance in sm.dirty: state = instance.get_state() if not meta.is_valid(instance): raise FieldValueError( json.dumps(instance._dbdata['errors'])) score = MIN_FLOAT if meta.ordering: if meta.ordering.auto: score = meta.ordering.name.incrby else: v = getattr(instance, meta.ordering.name, None) if v is not None: score = meta.ordering.field.scorefun(v) data = instance._dbdata['cleaned_data'] action = state.action prev_id = state.iid if state.persistent else '' id = instance.pkvalue() or '' data = flat_mapping(data) lua_data.extend((action, prev_id, id, score, len(data))) lua_data.extend(data) processed.append(state.iid) self.odmrun(pipe, 'commit', meta, (), meta_info, *lua_data, iids=processed) return pipe.execute()
python
def execute_session(self, session_data): '''Execute a session in redis.''' pipe = self.client.pipeline() for sm in session_data: # loop through model sessions meta = sm.meta if sm.structures: self.flush_structure(sm, pipe) delquery = None if sm.deletes is not None: delquery = sm.deletes.backend_query(pipe=pipe) self.accumulate_delete(pipe, delquery) if sm.dirty: meta_info = json.dumps(self.meta(meta)) lua_data = [len(sm.dirty)] processed = [] for instance in sm.dirty: state = instance.get_state() if not meta.is_valid(instance): raise FieldValueError( json.dumps(instance._dbdata['errors'])) score = MIN_FLOAT if meta.ordering: if meta.ordering.auto: score = meta.ordering.name.incrby else: v = getattr(instance, meta.ordering.name, None) if v is not None: score = meta.ordering.field.scorefun(v) data = instance._dbdata['cleaned_data'] action = state.action prev_id = state.iid if state.persistent else '' id = instance.pkvalue() or '' data = flat_mapping(data) lua_data.extend((action, prev_id, id, score, len(data))) lua_data.extend(data) processed.append(state.iid) self.odmrun(pipe, 'commit', meta, (), meta_info, *lua_data, iids=processed) return pipe.execute()
[ "def", "execute_session", "(", "self", ",", "session_data", ")", ":", "pipe", "=", "self", ".", "client", ".", "pipeline", "(", ")", "for", "sm", "in", "session_data", ":", "# loop through model sessions\r", "meta", "=", "sm", ".", "meta", "if", "sm", ".", "structures", ":", "self", ".", "flush_structure", "(", "sm", ",", "pipe", ")", "delquery", "=", "None", "if", "sm", ".", "deletes", "is", "not", "None", ":", "delquery", "=", "sm", ".", "deletes", ".", "backend_query", "(", "pipe", "=", "pipe", ")", "self", ".", "accumulate_delete", "(", "pipe", ",", "delquery", ")", "if", "sm", ".", "dirty", ":", "meta_info", "=", "json", ".", "dumps", "(", "self", ".", "meta", "(", "meta", ")", ")", "lua_data", "=", "[", "len", "(", "sm", ".", "dirty", ")", "]", "processed", "=", "[", "]", "for", "instance", "in", "sm", ".", "dirty", ":", "state", "=", "instance", ".", "get_state", "(", ")", "if", "not", "meta", ".", "is_valid", "(", "instance", ")", ":", "raise", "FieldValueError", "(", "json", ".", "dumps", "(", "instance", ".", "_dbdata", "[", "'errors'", "]", ")", ")", "score", "=", "MIN_FLOAT", "if", "meta", ".", "ordering", ":", "if", "meta", ".", "ordering", ".", "auto", ":", "score", "=", "meta", ".", "ordering", ".", "name", ".", "incrby", "else", ":", "v", "=", "getattr", "(", "instance", ",", "meta", ".", "ordering", ".", "name", ",", "None", ")", "if", "v", "is", "not", "None", ":", "score", "=", "meta", ".", "ordering", ".", "field", ".", "scorefun", "(", "v", ")", "data", "=", "instance", ".", "_dbdata", "[", "'cleaned_data'", "]", "action", "=", "state", ".", "action", "prev_id", "=", "state", ".", "iid", "if", "state", ".", "persistent", "else", "''", "id", "=", "instance", ".", "pkvalue", "(", ")", "or", "''", "data", "=", "flat_mapping", "(", "data", ")", "lua_data", ".", "extend", "(", "(", "action", ",", "prev_id", ",", "id", ",", "score", ",", "len", "(", "data", ")", ")", ")", "lua_data", ".", "extend", "(", "data", ")", "processed", ".", "append", "(", "state", ".", "iid", ")", "self", ".", "odmrun", "(", "pipe", ",", "'commit'", ",", "meta", ",", "(", ")", ",", "meta_info", ",", "*", "lua_data", ",", "iids", "=", "processed", ")", "return", "pipe", ".", "execute", "(", ")" ]
Execute a session in redis.
[ "Execute", "a", "session", "in", "redis", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L776-L814
lsbardel/python-stdnet
stdnet/backends/redisb/__init__.py
BackendDataServer.flush
def flush(self, meta=None): '''Flush all model keys from the database''' pattern = self.basekey(meta) if meta else self.namespace return self.client.delpattern('%s*' % pattern)
python
def flush(self, meta=None): '''Flush all model keys from the database''' pattern = self.basekey(meta) if meta else self.namespace return self.client.delpattern('%s*' % pattern)
[ "def", "flush", "(", "self", ",", "meta", "=", "None", ")", ":", "pattern", "=", "self", ".", "basekey", "(", "meta", ")", "if", "meta", "else", "self", ".", "namespace", "return", "self", ".", "client", ".", "delpattern", "(", "'%s*'", "%", "pattern", ")" ]
Flush all model keys from the database
[ "Flush", "all", "model", "keys", "from", "the", "database" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/__init__.py#L850-L853
rodluger/everest
everest/gp.py
GetCovariance
def GetCovariance(kernel, kernel_params, time, errors): ''' Returns the covariance matrix for a given light curve segment. :param array_like kernel_params: A list of kernel parameters \ (white noise amplitude, red noise amplitude, and red noise timescale) :param array_like time: The time array (*N*) :param array_like errors: The data error array (*N*) :returns: The covariance matrix :py:obj:`K` (*N*,*N*) ''' # NOTE: We purposefully compute the covariance matrix # *without* the GP white noise term K = np.diag(errors ** 2) K += GP(kernel, kernel_params, white=False).get_matrix(time) return K
python
def GetCovariance(kernel, kernel_params, time, errors): ''' Returns the covariance matrix for a given light curve segment. :param array_like kernel_params: A list of kernel parameters \ (white noise amplitude, red noise amplitude, and red noise timescale) :param array_like time: The time array (*N*) :param array_like errors: The data error array (*N*) :returns: The covariance matrix :py:obj:`K` (*N*,*N*) ''' # NOTE: We purposefully compute the covariance matrix # *without* the GP white noise term K = np.diag(errors ** 2) K += GP(kernel, kernel_params, white=False).get_matrix(time) return K
[ "def", "GetCovariance", "(", "kernel", ",", "kernel_params", ",", "time", ",", "errors", ")", ":", "# NOTE: We purposefully compute the covariance matrix", "# *without* the GP white noise term", "K", "=", "np", ".", "diag", "(", "errors", "**", "2", ")", "K", "+=", "GP", "(", "kernel", ",", "kernel_params", ",", "white", "=", "False", ")", ".", "get_matrix", "(", "time", ")", "return", "K" ]
Returns the covariance matrix for a given light curve segment. :param array_like kernel_params: A list of kernel parameters \ (white noise amplitude, red noise amplitude, and red noise timescale) :param array_like time: The time array (*N*) :param array_like errors: The data error array (*N*) :returns: The covariance matrix :py:obj:`K` (*N*,*N*)
[ "Returns", "the", "covariance", "matrix", "for", "a", "given", "light", "curve", "segment", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/gp.py#L71-L89
rodluger/everest
everest/gp.py
GetKernelParams
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[], giter=3, gmaxf=200, guess=None): ''' Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None` ''' log.info("Optimizing the GP...") # Save a copy of time and errors for later time_copy = np.array(time) errors_copy = np.array(errors) # Apply the mask time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Remove 5-sigma outliers to be safe f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Initial guesses and bounds white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)]) amp = np.nanstd(flux) tau = 30.0 if kernel == 'Basic': if guess is None: guess = [white, amp, tau] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [0.5, 100.]] elif kernel == 'QuasiPeriodic': if guess is None: guess = [white, amp, tau, 1., 20.] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [1e-5, 1e2], [0.02, 100.]] else: raise ValueError('Invalid value for `kernel`.') # Loop llbest = -np.inf xbest = np.array(guess) for i in range(giter): # Randomize an initial guess iguess = [np.inf for g in guess] for j, b in enumerate(bounds): tries = 0 while (iguess[j] < b[0]) or (iguess[j] > b[1]): iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j] tries += 1 if tries > 100: iguess[j] = b[0] + np.random.random() * (b[1] - b[0]) break # Optimize x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False, bounds=bounds, args=(time, flux, errors, kernel), maxfun=gmaxf) log.info('Iteration #%d/%d:' % (i + 1, giter)) log.info(' ' + x[2]['task'].decode('utf-8')) log.info(' ' + 'Function calls: %d' % x[2]['funcalls']) log.info(' ' + 'Log-likelihood: %.3e' % -x[1]) if kernel == 'Basic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Red timescale : %.2f days' % x[0][2]) elif kernel == 'QuasiPeriodic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Gamma : %.3e' % x[0][2]) log.info(' ' + 'Period : %.2f days' % x[0][3]) if -x[1] > llbest: llbest = -x[1] xbest = np.array(x[0]) return xbest
python
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[], giter=3, gmaxf=200, guess=None): ''' Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None` ''' log.info("Optimizing the GP...") # Save a copy of time and errors for later time_copy = np.array(time) errors_copy = np.array(errors) # Apply the mask time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Remove 5-sigma outliers to be safe f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Initial guesses and bounds white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)]) amp = np.nanstd(flux) tau = 30.0 if kernel == 'Basic': if guess is None: guess = [white, amp, tau] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [0.5, 100.]] elif kernel == 'QuasiPeriodic': if guess is None: guess = [white, amp, tau, 1., 20.] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [1e-5, 1e2], [0.02, 100.]] else: raise ValueError('Invalid value for `kernel`.') # Loop llbest = -np.inf xbest = np.array(guess) for i in range(giter): # Randomize an initial guess iguess = [np.inf for g in guess] for j, b in enumerate(bounds): tries = 0 while (iguess[j] < b[0]) or (iguess[j] > b[1]): iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j] tries += 1 if tries > 100: iguess[j] = b[0] + np.random.random() * (b[1] - b[0]) break # Optimize x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False, bounds=bounds, args=(time, flux, errors, kernel), maxfun=gmaxf) log.info('Iteration #%d/%d:' % (i + 1, giter)) log.info(' ' + x[2]['task'].decode('utf-8')) log.info(' ' + 'Function calls: %d' % x[2]['funcalls']) log.info(' ' + 'Log-likelihood: %.3e' % -x[1]) if kernel == 'Basic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Red timescale : %.2f days' % x[0][2]) elif kernel == 'QuasiPeriodic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Gamma : %.3e' % x[0][2]) log.info(' ' + 'Period : %.2f days' % x[0][3]) if -x[1] > llbest: llbest = -x[1] xbest = np.array(x[0]) return xbest
[ "def", "GetKernelParams", "(", "time", ",", "flux", ",", "errors", ",", "kernel", "=", "'Basic'", ",", "mask", "=", "[", "]", ",", "giter", "=", "3", ",", "gmaxf", "=", "200", ",", "guess", "=", "None", ")", ":", "log", ".", "info", "(", "\"Optimizing the GP...\"", ")", "# Save a copy of time and errors for later", "time_copy", "=", "np", ".", "array", "(", "time", ")", "errors_copy", "=", "np", ".", "array", "(", "errors", ")", "# Apply the mask", "time", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "errors", "=", "np", ".", "delete", "(", "errors", ",", "mask", ")", "# Remove 5-sigma outliers to be safe", "f", "=", "flux", "-", "savgol_filter", "(", "flux", ",", "49", ",", "2", ")", "+", "np", ".", "nanmedian", "(", "flux", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "mask", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "5", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "5", "*", "MAD", ")", ")", "[", "0", "]", "time", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "errors", "=", "np", ".", "delete", "(", "errors", ",", "mask", ")", "# Initial guesses and bounds", "white", "=", "np", ".", "nanmedian", "(", "[", "np", ".", "nanstd", "(", "c", ")", "for", "c", "in", "Chunks", "(", "flux", ",", "13", ")", "]", ")", "amp", "=", "np", ".", "nanstd", "(", "flux", ")", "tau", "=", "30.0", "if", "kernel", "==", "'Basic'", ":", "if", "guess", "is", "None", ":", "guess", "=", "[", "white", ",", "amp", ",", "tau", "]", "bounds", "=", "[", "[", "0.1", "*", "white", ",", "10.", "*", "white", "]", ",", "[", "1.", ",", "10000.", "*", "amp", "]", ",", "[", "0.5", ",", "100.", "]", "]", "elif", "kernel", "==", "'QuasiPeriodic'", ":", "if", "guess", "is", "None", ":", "guess", "=", "[", "white", ",", "amp", ",", "tau", ",", "1.", ",", "20.", "]", "bounds", "=", "[", "[", "0.1", "*", "white", ",", "10.", "*", "white", "]", ",", "[", "1.", ",", "10000.", "*", "amp", "]", ",", "[", "1e-5", ",", "1e2", "]", ",", "[", "0.02", ",", "100.", "]", "]", "else", ":", "raise", "ValueError", "(", "'Invalid value for `kernel`.'", ")", "# Loop", "llbest", "=", "-", "np", ".", "inf", "xbest", "=", "np", ".", "array", "(", "guess", ")", "for", "i", "in", "range", "(", "giter", ")", ":", "# Randomize an initial guess", "iguess", "=", "[", "np", ".", "inf", "for", "g", "in", "guess", "]", "for", "j", ",", "b", "in", "enumerate", "(", "bounds", ")", ":", "tries", "=", "0", "while", "(", "iguess", "[", "j", "]", "<", "b", "[", "0", "]", ")", "or", "(", "iguess", "[", "j", "]", ">", "b", "[", "1", "]", ")", ":", "iguess", "[", "j", "]", "=", "(", "1", "+", "0.5", "*", "np", ".", "random", ".", "randn", "(", ")", ")", "*", "guess", "[", "j", "]", "tries", "+=", "1", "if", "tries", ">", "100", ":", "iguess", "[", "j", "]", "=", "b", "[", "0", "]", "+", "np", ".", "random", ".", "random", "(", ")", "*", "(", "b", "[", "1", "]", "-", "b", "[", "0", "]", ")", "break", "# Optimize", "x", "=", "fmin_l_bfgs_b", "(", "NegLnLike", ",", "iguess", ",", "approx_grad", "=", "False", ",", "bounds", "=", "bounds", ",", "args", "=", "(", "time", ",", "flux", ",", "errors", ",", "kernel", ")", ",", "maxfun", "=", "gmaxf", ")", "log", ".", "info", "(", "'Iteration #%d/%d:'", "%", "(", "i", "+", "1", ",", "giter", ")", ")", "log", ".", "info", "(", "' '", "+", "x", "[", "2", "]", "[", "'task'", "]", ".", "decode", "(", "'utf-8'", ")", ")", "log", ".", "info", "(", "' '", "+", "'Function calls: %d'", "%", "x", "[", "2", "]", "[", "'funcalls'", "]", ")", "log", ".", "info", "(", "' '", "+", "'Log-likelihood: %.3e'", "%", "-", "x", "[", "1", "]", ")", "if", "kernel", "==", "'Basic'", ":", "log", ".", "info", "(", "' '", "+", "'White noise : %.3e (%.1f x error bars)'", "%", "(", "x", "[", "0", "]", "[", "0", "]", ",", "x", "[", "0", "]", "[", "0", "]", "/", "np", ".", "nanmedian", "(", "errors", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red amplitude : %.3e (%.1f x stand dev)'", "%", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "1", "]", "/", "np", ".", "nanstd", "(", "flux", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red timescale : %.2f days'", "%", "x", "[", "0", "]", "[", "2", "]", ")", "elif", "kernel", "==", "'QuasiPeriodic'", ":", "log", ".", "info", "(", "' '", "+", "'White noise : %.3e (%.1f x error bars)'", "%", "(", "x", "[", "0", "]", "[", "0", "]", ",", "x", "[", "0", "]", "[", "0", "]", "/", "np", ".", "nanmedian", "(", "errors", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Red amplitude : %.3e (%.1f x stand dev)'", "%", "(", "x", "[", "0", "]", "[", "1", "]", ",", "x", "[", "0", "]", "[", "1", "]", "/", "np", ".", "nanstd", "(", "flux", ")", ")", ")", "log", ".", "info", "(", "' '", "+", "'Gamma : %.3e'", "%", "x", "[", "0", "]", "[", "2", "]", ")", "log", ".", "info", "(", "' '", "+", "'Period : %.2f days'", "%", "x", "[", "0", "]", "[", "3", "]", ")", "if", "-", "x", "[", "1", "]", ">", "llbest", ":", "llbest", "=", "-", "x", "[", "1", "]", "xbest", "=", "np", ".", "array", "(", "x", "[", "0", "]", ")", "return", "xbest" ]
Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None`
[ "Optimizes", "the", "GP", "by", "training", "it", "on", "the", "current", "de", "-", "trended", "light", "curve", ".", "Returns", "the", "white", "noise", "amplitude", "red", "noise", "amplitude", "and", "red", "noise", "timescale", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/gp.py#L92-L192
rodluger/everest
everest/gp.py
NegLnLike
def NegLnLike(x, time, flux, errors, kernel): ''' Returns the negative log-likelihood function and its gradient. ''' gp = GP(kernel, x, white=True) gp.compute(time, errors) if OLDGEORGE: nll = -gp.lnlikelihood(flux) # NOTE: There was a bug on this next line! Used to be # # ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars # # But I think we want # # dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx # = gp.grad_lnlikelihood() * 1/x^2 * 2x # = 2 * gp.grad_lnlikelihood() / x # = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2) # = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars) # # (with a negative sign out front for the negative gradient). # So we probably weren't optimizing the GP correctly! This affects # all campaigns through C13. It's not a *huge* deal, since the sign # of the gradient was correct and the model isn't that sensitive to # the value of the hyperparameters, but it may have contributed to # the poor performance on super variable stars. In most cases it means # the solver takes longer to converge and isn't as good at finding # the minimum. ngr = -2 * gp.grad_lnlikelihood(flux) / np.sqrt(gp.kernel.pars) else: nll = -gp.log_likelihood(flux) ngr = -2 * gp.grad_log_likelihood(flux) / \ np.sqrt(np.exp(gp.get_parameter_vector())) return nll, ngr
python
def NegLnLike(x, time, flux, errors, kernel): ''' Returns the negative log-likelihood function and its gradient. ''' gp = GP(kernel, x, white=True) gp.compute(time, errors) if OLDGEORGE: nll = -gp.lnlikelihood(flux) # NOTE: There was a bug on this next line! Used to be # # ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars # # But I think we want # # dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx # = gp.grad_lnlikelihood() * 1/x^2 * 2x # = 2 * gp.grad_lnlikelihood() / x # = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2) # = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars) # # (with a negative sign out front for the negative gradient). # So we probably weren't optimizing the GP correctly! This affects # all campaigns through C13. It's not a *huge* deal, since the sign # of the gradient was correct and the model isn't that sensitive to # the value of the hyperparameters, but it may have contributed to # the poor performance on super variable stars. In most cases it means # the solver takes longer to converge and isn't as good at finding # the minimum. ngr = -2 * gp.grad_lnlikelihood(flux) / np.sqrt(gp.kernel.pars) else: nll = -gp.log_likelihood(flux) ngr = -2 * gp.grad_log_likelihood(flux) / \ np.sqrt(np.exp(gp.get_parameter_vector())) return nll, ngr
[ "def", "NegLnLike", "(", "x", ",", "time", ",", "flux", ",", "errors", ",", "kernel", ")", ":", "gp", "=", "GP", "(", "kernel", ",", "x", ",", "white", "=", "True", ")", "gp", ".", "compute", "(", "time", ",", "errors", ")", "if", "OLDGEORGE", ":", "nll", "=", "-", "gp", ".", "lnlikelihood", "(", "flux", ")", "# NOTE: There was a bug on this next line! Used to be", "#", "# ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars", "#", "# But I think we want", "#", "# dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx", "# = gp.grad_lnlikelihood() * 1/x^2 * 2x", "# = 2 * gp.grad_lnlikelihood() / x", "# = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2)", "# = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars)", "#", "# (with a negative sign out front for the negative gradient).", "# So we probably weren't optimizing the GP correctly! This affects", "# all campaigns through C13. It's not a *huge* deal, since the sign", "# of the gradient was correct and the model isn't that sensitive to", "# the value of the hyperparameters, but it may have contributed to", "# the poor performance on super variable stars. In most cases it means", "# the solver takes longer to converge and isn't as good at finding", "# the minimum.", "ngr", "=", "-", "2", "*", "gp", ".", "grad_lnlikelihood", "(", "flux", ")", "/", "np", ".", "sqrt", "(", "gp", ".", "kernel", ".", "pars", ")", "else", ":", "nll", "=", "-", "gp", ".", "log_likelihood", "(", "flux", ")", "ngr", "=", "-", "2", "*", "gp", ".", "grad_log_likelihood", "(", "flux", ")", "/", "np", ".", "sqrt", "(", "np", ".", "exp", "(", "gp", ".", "get_parameter_vector", "(", ")", ")", ")", "return", "nll", ",", "ngr" ]
Returns the negative log-likelihood function and its gradient.
[ "Returns", "the", "negative", "log", "-", "likelihood", "function", "and", "its", "gradient", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/gp.py#L195-L231
lsbardel/python-stdnet
stdnet/utils/dates.py
missing_intervals
def missing_intervals(startdate, enddate, start, end, dateconverter=None, parseinterval=None, intervals=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.''' parseinterval = parseinterval or default_parse_interval dateconverter = dateconverter or todate startdate = dateconverter(parseinterval(startdate, 0)) enddate = max(startdate, dateconverter(parseinterval(enddate, 0))) if intervals is not None and not isinstance(intervals, Intervals): intervals = Intervals(intervals) calc_intervals = Intervals() # we have some history already if start: # the startdate not available if startdate < start: calc_start = startdate calc_end = parseinterval(start, -1) if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) if enddate > end: calc_start = parseinterval(end, 1) calc_end = enddate if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) else: start = startdate end = enddate calc_intervals.append(Interval(startdate, enddate)) if calc_intervals: if intervals: calc_intervals.extend(intervals) elif intervals: calc_intervals = intervals return calc_intervals
python
def missing_intervals(startdate, enddate, start, end, dateconverter=None, parseinterval=None, intervals=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.''' parseinterval = parseinterval or default_parse_interval dateconverter = dateconverter or todate startdate = dateconverter(parseinterval(startdate, 0)) enddate = max(startdate, dateconverter(parseinterval(enddate, 0))) if intervals is not None and not isinstance(intervals, Intervals): intervals = Intervals(intervals) calc_intervals = Intervals() # we have some history already if start: # the startdate not available if startdate < start: calc_start = startdate calc_end = parseinterval(start, -1) if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) if enddate > end: calc_start = parseinterval(end, 1) calc_end = enddate if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) else: start = startdate end = enddate calc_intervals.append(Interval(startdate, enddate)) if calc_intervals: if intervals: calc_intervals.extend(intervals) elif intervals: calc_intervals = intervals return calc_intervals
[ "def", "missing_intervals", "(", "startdate", ",", "enddate", ",", "start", ",", "end", ",", "dateconverter", "=", "None", ",", "parseinterval", "=", "None", ",", "intervals", "=", "None", ")", ":", "parseinterval", "=", "parseinterval", "or", "default_parse_interval", "dateconverter", "=", "dateconverter", "or", "todate", "startdate", "=", "dateconverter", "(", "parseinterval", "(", "startdate", ",", "0", ")", ")", "enddate", "=", "max", "(", "startdate", ",", "dateconverter", "(", "parseinterval", "(", "enddate", ",", "0", ")", ")", ")", "if", "intervals", "is", "not", "None", "and", "not", "isinstance", "(", "intervals", ",", "Intervals", ")", ":", "intervals", "=", "Intervals", "(", "intervals", ")", "calc_intervals", "=", "Intervals", "(", ")", "# we have some history already\r", "if", "start", ":", "# the startdate not available\r", "if", "startdate", "<", "start", ":", "calc_start", "=", "startdate", "calc_end", "=", "parseinterval", "(", "start", ",", "-", "1", ")", "if", "calc_end", ">=", "calc_start", ":", "calc_intervals", ".", "append", "(", "Interval", "(", "calc_start", ",", "calc_end", ")", ")", "if", "enddate", ">", "end", ":", "calc_start", "=", "parseinterval", "(", "end", ",", "1", ")", "calc_end", "=", "enddate", "if", "calc_end", ">=", "calc_start", ":", "calc_intervals", ".", "append", "(", "Interval", "(", "calc_start", ",", "calc_end", ")", ")", "else", ":", "start", "=", "startdate", "end", "=", "enddate", "calc_intervals", ".", "append", "(", "Interval", "(", "startdate", ",", "enddate", ")", ")", "if", "calc_intervals", ":", "if", "intervals", ":", "calc_intervals", ".", "extend", "(", "intervals", ")", "elif", "intervals", ":", "calc_intervals", "=", "intervals", "return", "calc_intervals" ]
Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.
[ "Given", "a", "startdate", "and", "an", "enddate", "dates", "evaluate", "the", "date", "intervals", "from", "which", "data", "is", "not", "available", ".", "It", "return", "a", "list", "of", "two", "-", "dimensional", "tuples", "containing", "start", "and", "end", "date", "for", "the", "interval", ".", "The", "list", "could", "countain", "0", "1", "or", "2", "tuples", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/dates.py#L120-L161
lsbardel/python-stdnet
stdnet/utils/dates.py
dategenerator
def dategenerator(start, end, step=1, desc=False): '''Generates dates between *atrt* and *end*.''' delta = timedelta(abs(step)) end = max(start, end) if desc: dt = end while dt >= start: yield dt dt -= delta else: dt = start while dt <= end: yield dt dt += delta
python
def dategenerator(start, end, step=1, desc=False): '''Generates dates between *atrt* and *end*.''' delta = timedelta(abs(step)) end = max(start, end) if desc: dt = end while dt >= start: yield dt dt -= delta else: dt = start while dt <= end: yield dt dt += delta
[ "def", "dategenerator", "(", "start", ",", "end", ",", "step", "=", "1", ",", "desc", "=", "False", ")", ":", "delta", "=", "timedelta", "(", "abs", "(", "step", ")", ")", "end", "=", "max", "(", "start", ",", "end", ")", "if", "desc", ":", "dt", "=", "end", "while", "dt", ">=", "start", ":", "yield", "dt", "dt", "-=", "delta", "else", ":", "dt", "=", "start", "while", "dt", "<=", "end", ":", "yield", "dt", "dt", "+=", "delta" ]
Generates dates between *atrt* and *end*.
[ "Generates", "dates", "between", "*", "atrt", "*", "and", "*", "end", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/dates.py#L164-L177
rodluger/everest
everest/utils.py
InitLog
def InitLog(file_name=None, log_level=logging.DEBUG, screen_level=logging.CRITICAL, pdb=False): ''' A little routine to initialize the logging functionality. :param str file_name: The name of the file to log to. \ Default :py:obj:`None` (set internally by :py:mod:`everest`) :param int log_level: The file logging level (0-50). Default 10 (debug) :param int screen_level: The screen logging level (0-50). \ Default 50 (critical) ''' # Initialize the logging root = logging.getLogger() root.handlers = [] root.setLevel(logging.DEBUG) # File handler if file_name is not None: if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) fh = logging.FileHandler(file_name) fh.setLevel(log_level) fh_formatter = logging.Formatter( "%(asctime)s %(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s", datefmt="%m/%d/%y %H:%M:%S") fh.setFormatter(fh_formatter) fh.addFilter(NoPILFilter()) root.addHandler(fh) # Screen handler sh = logging.StreamHandler(sys.stdout) if pdb: sh.setLevel(logging.DEBUG) else: sh.setLevel(screen_level) sh_formatter = logging.Formatter( "%(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s") sh.setFormatter(sh_formatter) sh.addFilter(NoPILFilter()) root.addHandler(sh) # Set exception hook if pdb: sys.excepthook = ExceptionHookPDB else: sys.excepthook = ExceptionHook
python
def InitLog(file_name=None, log_level=logging.DEBUG, screen_level=logging.CRITICAL, pdb=False): ''' A little routine to initialize the logging functionality. :param str file_name: The name of the file to log to. \ Default :py:obj:`None` (set internally by :py:mod:`everest`) :param int log_level: The file logging level (0-50). Default 10 (debug) :param int screen_level: The screen logging level (0-50). \ Default 50 (critical) ''' # Initialize the logging root = logging.getLogger() root.handlers = [] root.setLevel(logging.DEBUG) # File handler if file_name is not None: if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) fh = logging.FileHandler(file_name) fh.setLevel(log_level) fh_formatter = logging.Formatter( "%(asctime)s %(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s", datefmt="%m/%d/%y %H:%M:%S") fh.setFormatter(fh_formatter) fh.addFilter(NoPILFilter()) root.addHandler(fh) # Screen handler sh = logging.StreamHandler(sys.stdout) if pdb: sh.setLevel(logging.DEBUG) else: sh.setLevel(screen_level) sh_formatter = logging.Formatter( "%(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s") sh.setFormatter(sh_formatter) sh.addFilter(NoPILFilter()) root.addHandler(sh) # Set exception hook if pdb: sys.excepthook = ExceptionHookPDB else: sys.excepthook = ExceptionHook
[ "def", "InitLog", "(", "file_name", "=", "None", ",", "log_level", "=", "logging", ".", "DEBUG", ",", "screen_level", "=", "logging", ".", "CRITICAL", ",", "pdb", "=", "False", ")", ":", "# Initialize the logging", "root", "=", "logging", ".", "getLogger", "(", ")", "root", ".", "handlers", "=", "[", "]", "root", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "# File handler", "if", "file_name", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ")", "fh", "=", "logging", ".", "FileHandler", "(", "file_name", ")", "fh", ".", "setLevel", "(", "log_level", ")", "fh_formatter", "=", "logging", ".", "Formatter", "(", "\"%(asctime)s %(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s\"", ",", "datefmt", "=", "\"%m/%d/%y %H:%M:%S\"", ")", "fh", ".", "setFormatter", "(", "fh_formatter", ")", "fh", ".", "addFilter", "(", "NoPILFilter", "(", ")", ")", "root", ".", "addHandler", "(", "fh", ")", "# Screen handler", "sh", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "if", "pdb", ":", "sh", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "sh", ".", "setLevel", "(", "screen_level", ")", "sh_formatter", "=", "logging", ".", "Formatter", "(", "\"%(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s\"", ")", "sh", ".", "setFormatter", "(", "sh_formatter", ")", "sh", ".", "addFilter", "(", "NoPILFilter", "(", ")", ")", "root", ".", "addHandler", "(", "sh", ")", "# Set exception hook", "if", "pdb", ":", "sys", ".", "excepthook", "=", "ExceptionHookPDB", "else", ":", "sys", ".", "excepthook", "=", "ExceptionHook" ]
A little routine to initialize the logging functionality. :param str file_name: The name of the file to log to. \ Default :py:obj:`None` (set internally by :py:mod:`everest`) :param int log_level: The file logging level (0-50). Default 10 (debug) :param int screen_level: The screen logging level (0-50). \ Default 50 (critical)
[ "A", "little", "routine", "to", "initialize", "the", "logging", "functionality", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/utils.py#L66-L113
rodluger/everest
everest/utils.py
ExceptionHook
def ExceptionHook(exctype, value, tb): ''' A custom exception handler that logs errors to file. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb)
python
def ExceptionHook(exctype, value, tb): ''' A custom exception handler that logs errors to file. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb)
[ "def", "ExceptionHook", "(", "exctype", ",", "value", ",", "tb", ")", ":", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "log", ".", "error", "(", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", "for", "line", "in", "traceback", ".", "format_tb", "(", "tb", ")", ":", "log", ".", "error", "(", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", "sys", ".", "__excepthook__", "(", "exctype", ",", "value", ",", "tb", ")" ]
A custom exception handler that logs errors to file.
[ "A", "custom", "exception", "handler", "that", "logs", "errors", "to", "file", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/utils.py#L116-L126
rodluger/everest
everest/utils.py
ExceptionHookPDB
def ExceptionHookPDB(exctype, value, tb): ''' A custom exception handler, with :py:obj:`pdb` post-mortem for debugging. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb) pdb.pm()
python
def ExceptionHookPDB(exctype, value, tb): ''' A custom exception handler, with :py:obj:`pdb` post-mortem for debugging. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb) pdb.pm()
[ "def", "ExceptionHookPDB", "(", "exctype", ",", "value", ",", "tb", ")", ":", "for", "line", "in", "traceback", ".", "format_exception_only", "(", "exctype", ",", "value", ")", ":", "log", ".", "error", "(", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", "for", "line", "in", "traceback", ".", "format_tb", "(", "tb", ")", ":", "log", ".", "error", "(", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", "sys", ".", "__excepthook__", "(", "exctype", ",", "value", ",", "tb", ")", "pdb", ".", "pm", "(", ")" ]
A custom exception handler, with :py:obj:`pdb` post-mortem for debugging.
[ "A", "custom", "exception", "handler", "with", ":", "py", ":", "obj", ":", "pdb", "post", "-", "mortem", "for", "debugging", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/utils.py#L129-L140
rodluger/everest
everest/utils.py
sort_like
def sort_like(l, col1, col2): ''' Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`. Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1`` and returns ``l[i]``. This is useful when comparing the CDPP values of catalogs generated by different pipelines. The target IDs are all the same, but won't necessarily be in the same order. This allows :py:obj:`everest` to sort the CDPP arrays so that the targets match. :param array_like l: The list or array to sort :param array_like col1: A list or array (same length as :py:obj:`l`) :param array_like col2: A second list or array containing the same \ elements as :py:obj:`col1` but in a different order ''' s = np.zeros_like(col1) * np.nan for i, c in enumerate(col1): j = np.argmax(col2 == c) if j == 0: if col2[0] != c: continue s[i] = l[j] return s
python
def sort_like(l, col1, col2): ''' Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`. Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1`` and returns ``l[i]``. This is useful when comparing the CDPP values of catalogs generated by different pipelines. The target IDs are all the same, but won't necessarily be in the same order. This allows :py:obj:`everest` to sort the CDPP arrays so that the targets match. :param array_like l: The list or array to sort :param array_like col1: A list or array (same length as :py:obj:`l`) :param array_like col2: A second list or array containing the same \ elements as :py:obj:`col1` but in a different order ''' s = np.zeros_like(col1) * np.nan for i, c in enumerate(col1): j = np.argmax(col2 == c) if j == 0: if col2[0] != c: continue s[i] = l[j] return s
[ "def", "sort_like", "(", "l", ",", "col1", ",", "col2", ")", ":", "s", "=", "np", ".", "zeros_like", "(", "col1", ")", "*", "np", ".", "nan", "for", "i", ",", "c", "in", "enumerate", "(", "col1", ")", ":", "j", "=", "np", ".", "argmax", "(", "col2", "==", "c", ")", "if", "j", "==", "0", ":", "if", "col2", "[", "0", "]", "!=", "c", ":", "continue", "s", "[", "i", "]", "=", "l", "[", "j", "]", "return", "s" ]
Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`. Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1`` and returns ``l[i]``. This is useful when comparing the CDPP values of catalogs generated by different pipelines. The target IDs are all the same, but won't necessarily be in the same order. This allows :py:obj:`everest` to sort the CDPP arrays so that the targets match. :param array_like l: The list or array to sort :param array_like col1: A list or array (same length as :py:obj:`l`) :param array_like col2: A second list or array containing the same \ elements as :py:obj:`col1` but in a different order
[ "Sorts", "the", "list", ":", "py", ":", "obj", ":", "l", "by", "comparing", ":", "py", ":", "obj", ":", "col2", "to", ":", "py", ":", "obj", ":", "col1", ".", "Specifically", "finds", "the", "indices", ":", "py", ":", "obj", ":", "i", "such", "that", "col2", "[", "i", "]", "=", "col1", "and", "returns", "l", "[", "i", "]", ".", "This", "is", "useful", "when", "comparing", "the", "CDPP", "values", "of", "catalogs", "generated", "by", "different", "pipelines", ".", "The", "target", "IDs", "are", "all", "the", "same", "but", "won", "t", "necessarily", "be", "in", "the", "same", "order", ".", "This", "allows", ":", "py", ":", "obj", ":", "everest", "to", "sort", "the", "CDPP", "arrays", "so", "that", "the", "targets", "match", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/utils.py#L159-L183
rodluger/everest
everest/utils.py
prange
def prange(*x): ''' Progress bar range with `tqdm` ''' try: root = logging.getLogger() if len(root.handlers): for h in root.handlers: if (type(h) is logging.StreamHandler) and \ (h.level != logging.CRITICAL): from tqdm import tqdm return tqdm(range(*x)) return range(*x) else: from tqdm import tqdm return tqdm(range(*x)) except ImportError: return range(*x)
python
def prange(*x): ''' Progress bar range with `tqdm` ''' try: root = logging.getLogger() if len(root.handlers): for h in root.handlers: if (type(h) is logging.StreamHandler) and \ (h.level != logging.CRITICAL): from tqdm import tqdm return tqdm(range(*x)) return range(*x) else: from tqdm import tqdm return tqdm(range(*x)) except ImportError: return range(*x)
[ "def", "prange", "(", "*", "x", ")", ":", "try", ":", "root", "=", "logging", ".", "getLogger", "(", ")", "if", "len", "(", "root", ".", "handlers", ")", ":", "for", "h", "in", "root", ".", "handlers", ":", "if", "(", "type", "(", "h", ")", "is", "logging", ".", "StreamHandler", ")", "and", "(", "h", ".", "level", "!=", "logging", ".", "CRITICAL", ")", ":", "from", "tqdm", "import", "tqdm", "return", "tqdm", "(", "range", "(", "*", "x", ")", ")", "return", "range", "(", "*", "x", ")", "else", ":", "from", "tqdm", "import", "tqdm", "return", "tqdm", "(", "range", "(", "*", "x", ")", ")", "except", "ImportError", ":", "return", "range", "(", "*", "x", ")" ]
Progress bar range with `tqdm`
[ "Progress", "bar", "range", "with", "tqdm" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/utils.py#L237-L256
lsbardel/python-stdnet
stdnet/apps/columnts/npts.py
ColumnTS.front
def front(self, *fields): '''Return the front pair of the structure''' ts = self.irange(0, 0, fields=fields) if ts: return ts.start(), ts[0]
python
def front(self, *fields): '''Return the front pair of the structure''' ts = self.irange(0, 0, fields=fields) if ts: return ts.start(), ts[0]
[ "def", "front", "(", "self", ",", "*", "fields", ")", ":", "ts", "=", "self", ".", "irange", "(", "0", ",", "0", ",", "fields", "=", "fields", ")", "if", "ts", ":", "return", "ts", ".", "start", "(", ")", ",", "ts", "[", "0", "]" ]
Return the front pair of the structure
[ "Return", "the", "front", "pair", "of", "the", "structure" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/npts.py#L19-L23
lsbardel/python-stdnet
stdnet/apps/columnts/npts.py
ColumnTS.back
def back(self, *fields): '''Return the back pair of the structure''' ts = self.irange(-1, -1, fields=fields) if ts: return ts.end(), ts[0]
python
def back(self, *fields): '''Return the back pair of the structure''' ts = self.irange(-1, -1, fields=fields) if ts: return ts.end(), ts[0]
[ "def", "back", "(", "self", ",", "*", "fields", ")", ":", "ts", "=", "self", ".", "irange", "(", "-", "1", ",", "-", "1", ",", "fields", "=", "fields", ")", "if", "ts", ":", "return", "ts", ".", "end", "(", ")", ",", "ts", "[", "0", "]" ]
Return the back pair of the structure
[ "Return", "the", "back", "pair", "of", "the", "structure" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/npts.py#L25-L29
lsbardel/python-stdnet
stdnet/backends/__init__.py
parse_backend
def parse_backend(backend): """Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple.""" r = urlparse.urlsplit(backend) scheme, host = r.scheme, r.netloc path, query = r.path, r.query if path and not query: query, path = path, '' if query: if query.find('?'): path = query else: query = query[1:] if query: params = dict(urlparse.parse_qsl(query)) else: params = {} return scheme, host, params
python
def parse_backend(backend): """Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple.""" r = urlparse.urlsplit(backend) scheme, host = r.scheme, r.netloc path, query = r.path, r.query if path and not query: query, path = path, '' if query: if query.find('?'): path = query else: query = query[1:] if query: params = dict(urlparse.parse_qsl(query)) else: params = {} return scheme, host, params
[ "def", "parse_backend", "(", "backend", ")", ":", "r", "=", "urlparse", ".", "urlsplit", "(", "backend", ")", "scheme", ",", "host", "=", "r", ".", "scheme", ",", "r", ".", "netloc", "path", ",", "query", "=", "r", ".", "path", ",", "r", ".", "query", "if", "path", "and", "not", "query", ":", "query", ",", "path", "=", "path", ",", "''", "if", "query", ":", "if", "query", ".", "find", "(", "'?'", ")", ":", "path", "=", "query", "else", ":", "query", "=", "query", "[", "1", ":", "]", "if", "query", ":", "params", "=", "dict", "(", "urlparse", ".", "parse_qsl", "(", "query", ")", ")", "else", ":", "params", "=", "{", "}", "return", "scheme", ",", "host", ",", "params" ]
Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple.
[ "Converts", "the", "backend", "into", "the", "database", "connection", "parameters", ".", "It", "returns", "a", "(", "scheme", "host", "params", ")", "tuple", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L470-L488
lsbardel/python-stdnet
stdnet/backends/__init__.py
getdb
def getdb(backend=None, **kwargs): '''get a :class:`BackendDataServer`.''' if isinstance(backend, BackendDataServer): return backend backend = backend or settings.DEFAULT_BACKEND if not backend: return None scheme, address, params = parse_backend(backend) params.update(kwargs) if 'timeout' in params: params['timeout'] = int(params['timeout']) return _getdb(scheme, address, params)
python
def getdb(backend=None, **kwargs): '''get a :class:`BackendDataServer`.''' if isinstance(backend, BackendDataServer): return backend backend = backend or settings.DEFAULT_BACKEND if not backend: return None scheme, address, params = parse_backend(backend) params.update(kwargs) if 'timeout' in params: params['timeout'] = int(params['timeout']) return _getdb(scheme, address, params)
[ "def", "getdb", "(", "backend", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "backend", ",", "BackendDataServer", ")", ":", "return", "backend", "backend", "=", "backend", "or", "settings", ".", "DEFAULT_BACKEND", "if", "not", "backend", ":", "return", "None", "scheme", ",", "address", ",", "params", "=", "parse_backend", "(", "backend", ")", "params", ".", "update", "(", "kwargs", ")", "if", "'timeout'", "in", "params", ":", "params", "[", "'timeout'", "]", "=", "int", "(", "params", "[", "'timeout'", "]", ")", "return", "_getdb", "(", "scheme", ",", "address", ",", "params", ")" ]
get a :class:`BackendDataServer`.
[ "get", "a", ":", "class", ":", "BackendDataServer", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L499-L510
lsbardel/python-stdnet
stdnet/backends/__init__.py
BackendDataServer.basekey
def basekey(self, meta, *args): """Calculate the key to access model data. :parameter meta: a :class:`stdnet.odm.Metaclass`. :parameter args: optional list of strings to prepend to the basekey. :rtype: a native string """ key = '%s%s' % (self.namespace, meta.modelkey) postfix = ':'.join((str(p) for p in args if p is not None)) return '%s:%s' % (key, postfix) if postfix else key
python
def basekey(self, meta, *args): """Calculate the key to access model data. :parameter meta: a :class:`stdnet.odm.Metaclass`. :parameter args: optional list of strings to prepend to the basekey. :rtype: a native string """ key = '%s%s' % (self.namespace, meta.modelkey) postfix = ':'.join((str(p) for p in args if p is not None)) return '%s:%s' % (key, postfix) if postfix else key
[ "def", "basekey", "(", "self", ",", "meta", ",", "*", "args", ")", ":", "key", "=", "'%s%s'", "%", "(", "self", ".", "namespace", ",", "meta", ".", "modelkey", ")", "postfix", "=", "':'", ".", "join", "(", "(", "str", "(", "p", ")", "for", "p", "in", "args", "if", "p", "is", "not", "None", ")", ")", "return", "'%s:%s'", "%", "(", "key", ",", "postfix", ")", "if", "postfix", "else", "key" ]
Calculate the key to access model data. :parameter meta: a :class:`stdnet.odm.Metaclass`. :parameter args: optional list of strings to prepend to the basekey. :rtype: a native string
[ "Calculate", "the", "key", "to", "access", "model", "data", ".", ":", "parameter", "meta", ":", "a", ":", "class", ":", "stdnet", ".", "odm", ".", "Metaclass", ".", ":", "parameter", "args", ":", "optional", "list", "of", "strings", "to", "prepend", "to", "the", "basekey", ".", ":", "rtype", ":", "a", "native", "string" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L208-L217
lsbardel/python-stdnet
stdnet/backends/__init__.py
BackendDataServer.make_objects
def make_objects(self, meta, data, related_fields=None): '''Generator of :class:`stdnet.odm.StdModel` instances with data from database. :parameter meta: instance of model :class:`stdnet.odm.Metaclass`. :parameter data: iterator over instances data. ''' make_object = meta.make_object related_data = [] if related_fields: for fname, fdata in iteritems(related_fields): field = meta.dfields[fname] if field in meta.multifields: related = dict(fdata) multi = True else: multi = False relmodel = field.relmodel related = dict(((obj.id, obj) for obj in self.make_objects(relmodel._meta, fdata))) related_data.append((field, related, multi)) for state in data: instance = make_object(state, self) for field, rdata, multi in related_data: if multi: field.set_cache(instance, rdata.get(str(instance.id))) else: rid = getattr(instance, field.attname, None) if rid is not None: value = rdata.get(rid) setattr(instance, field.name, value) yield instance
python
def make_objects(self, meta, data, related_fields=None): '''Generator of :class:`stdnet.odm.StdModel` instances with data from database. :parameter meta: instance of model :class:`stdnet.odm.Metaclass`. :parameter data: iterator over instances data. ''' make_object = meta.make_object related_data = [] if related_fields: for fname, fdata in iteritems(related_fields): field = meta.dfields[fname] if field in meta.multifields: related = dict(fdata) multi = True else: multi = False relmodel = field.relmodel related = dict(((obj.id, obj) for obj in self.make_objects(relmodel._meta, fdata))) related_data.append((field, related, multi)) for state in data: instance = make_object(state, self) for field, rdata, multi in related_data: if multi: field.set_cache(instance, rdata.get(str(instance.id))) else: rid = getattr(instance, field.attname, None) if rid is not None: value = rdata.get(rid) setattr(instance, field.name, value) yield instance
[ "def", "make_objects", "(", "self", ",", "meta", ",", "data", ",", "related_fields", "=", "None", ")", ":", "make_object", "=", "meta", ".", "make_object", "related_data", "=", "[", "]", "if", "related_fields", ":", "for", "fname", ",", "fdata", "in", "iteritems", "(", "related_fields", ")", ":", "field", "=", "meta", ".", "dfields", "[", "fname", "]", "if", "field", "in", "meta", ".", "multifields", ":", "related", "=", "dict", "(", "fdata", ")", "multi", "=", "True", "else", ":", "multi", "=", "False", "relmodel", "=", "field", ".", "relmodel", "related", "=", "dict", "(", "(", "(", "obj", ".", "id", ",", "obj", ")", "for", "obj", "in", "self", ".", "make_objects", "(", "relmodel", ".", "_meta", ",", "fdata", ")", ")", ")", "related_data", ".", "append", "(", "(", "field", ",", "related", ",", "multi", ")", ")", "for", "state", "in", "data", ":", "instance", "=", "make_object", "(", "state", ",", "self", ")", "for", "field", ",", "rdata", ",", "multi", "in", "related_data", ":", "if", "multi", ":", "field", ".", "set_cache", "(", "instance", ",", "rdata", ".", "get", "(", "str", "(", "instance", ".", "id", ")", ")", ")", "else", ":", "rid", "=", "getattr", "(", "instance", ",", "field", ".", "attname", ",", "None", ")", "if", "rid", "is", "not", "None", ":", "value", "=", "rdata", ".", "get", "(", "rid", ")", "setattr", "(", "instance", ",", "field", ".", "name", ",", "value", ")", "yield", "instance" ]
Generator of :class:`stdnet.odm.StdModel` instances with data from database. :parameter meta: instance of model :class:`stdnet.odm.Metaclass`. :parameter data: iterator over instances data.
[ "Generator", "of", ":", "class", ":", "stdnet", ".", "odm", ".", "StdModel", "instances", "with", "data", "from", "database", ".", ":", "parameter", "meta", ":", "instance", "of", "model", ":", "class", ":", "stdnet", ".", "odm", ".", "Metaclass", ".", ":", "parameter", "data", ":", "iterator", "over", "instances", "data", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L227-L258
lsbardel/python-stdnet
stdnet/backends/__init__.py
BackendDataServer.structure
def structure(self, instance, client=None): '''Create a backend :class:`stdnet.odm.Structure` handler. :param instance: a :class:`stdnet.odm.Structure` :param client: Optional client handler. ''' struct = self.struct_map.get(instance._meta.name) if struct is None: raise ModelNotAvailable('"%s" is not available for backend ' '"%s"' % (instance._meta.name, self)) client = client if client is not None else self.client return struct(instance, self, client)
python
def structure(self, instance, client=None): '''Create a backend :class:`stdnet.odm.Structure` handler. :param instance: a :class:`stdnet.odm.Structure` :param client: Optional client handler. ''' struct = self.struct_map.get(instance._meta.name) if struct is None: raise ModelNotAvailable('"%s" is not available for backend ' '"%s"' % (instance._meta.name, self)) client = client if client is not None else self.client return struct(instance, self, client)
[ "def", "structure", "(", "self", ",", "instance", ",", "client", "=", "None", ")", ":", "struct", "=", "self", ".", "struct_map", ".", "get", "(", "instance", ".", "_meta", ".", "name", ")", "if", "struct", "is", "None", ":", "raise", "ModelNotAvailable", "(", "'\"%s\" is not available for backend '", "'\"%s\"'", "%", "(", "instance", ".", "_meta", ".", "name", ",", "self", ")", ")", "client", "=", "client", "if", "client", "is", "not", "None", "else", "self", ".", "client", "return", "struct", "(", "instance", ",", "self", ",", "client", ")" ]
Create a backend :class:`stdnet.odm.Structure` handler. :param instance: a :class:`stdnet.odm.Structure` :param client: Optional client handler.
[ "Create", "a", "backend", ":", "class", ":", "stdnet", ".", "odm", ".", "Structure", "handler", ".", ":", "param", "instance", ":", "a", ":", "class", ":", "stdnet", ".", "odm", ".", "Structure", ":", "param", "client", ":", "Optional", "client", "handler", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L263-L274
rodluger/everest
everest/user.py
Search
def Search(ID, mission='k2'): """Why is my target not in the EVEREST database?""" # Only K2 supported for now assert mission == 'k2', "Only the K2 mission is supported for now." print("Searching for target %d..." % ID) # First check if it is in the database season = missions.k2.Season(ID) if season in [91, 92, [91, 92]]: print("Campaign 9 is currently not part of the EVEREST catalog.") return elif season == 101: print("The first half of campaign 10 is not currently part of " + "the EVEREST catalog.") return elif season is not None: print("Target is in campaign %d of the EVEREST catalog." % season) return # Get the kplr object star = k2plr_client.k2_star(ID) # First check if this is a star if star.objtype.lower() != "star": print("Target is of type %s, not STAR, " % star.objtype + "and is therefore not included in the EVEREST catalog.") return # Let's try to download the pixel data and see what happens try: tpf = star.get_target_pixel_files() except: print("Unable to download the raw pixel files for this target.") return if len(tpf) == 0: print("Raw pixel files are not available for this target. Looks like " + "data may not have been collected for it.") return # Perhaps it's in a campaign we haven't gotten to yet if tpf[0].sci_campaign not in missions.k2.SEASONS: print("Targets for campaign %d are not yet available." % tpf[0].sci_campaign) return # Let's try to download the K2SFF data try: k2sff = k2plr.K2SFF(ID) except: print("Error downloading the K2SFF light curve for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Let's try to get the aperture try: assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture." except: print("Unable to retrieve the K2SFF aperture for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Perhaps the star is *super* saturated and we didn't bother # de-trending it? if star.kp < 8: print("Target has Kp = %.1f and is too saturated " + "for proper de-trending with EVEREST.") return # I'm out of ideas print("I'm not sure why this target isn't in the EVEREST catalog." + "You can try de-trending it yourself:") print("http://faculty.washington.edu/rodluger/everest/pipeline.html") return
python
def Search(ID, mission='k2'): """Why is my target not in the EVEREST database?""" # Only K2 supported for now assert mission == 'k2', "Only the K2 mission is supported for now." print("Searching for target %d..." % ID) # First check if it is in the database season = missions.k2.Season(ID) if season in [91, 92, [91, 92]]: print("Campaign 9 is currently not part of the EVEREST catalog.") return elif season == 101: print("The first half of campaign 10 is not currently part of " + "the EVEREST catalog.") return elif season is not None: print("Target is in campaign %d of the EVEREST catalog." % season) return # Get the kplr object star = k2plr_client.k2_star(ID) # First check if this is a star if star.objtype.lower() != "star": print("Target is of type %s, not STAR, " % star.objtype + "and is therefore not included in the EVEREST catalog.") return # Let's try to download the pixel data and see what happens try: tpf = star.get_target_pixel_files() except: print("Unable to download the raw pixel files for this target.") return if len(tpf) == 0: print("Raw pixel files are not available for this target. Looks like " + "data may not have been collected for it.") return # Perhaps it's in a campaign we haven't gotten to yet if tpf[0].sci_campaign not in missions.k2.SEASONS: print("Targets for campaign %d are not yet available." % tpf[0].sci_campaign) return # Let's try to download the K2SFF data try: k2sff = k2plr.K2SFF(ID) except: print("Error downloading the K2SFF light curve for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Let's try to get the aperture try: assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture." except: print("Unable to retrieve the K2SFF aperture for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Perhaps the star is *super* saturated and we didn't bother # de-trending it? if star.kp < 8: print("Target has Kp = %.1f and is too saturated " + "for proper de-trending with EVEREST.") return # I'm out of ideas print("I'm not sure why this target isn't in the EVEREST catalog." + "You can try de-trending it yourself:") print("http://faculty.washington.edu/rodluger/everest/pipeline.html") return
[ "def", "Search", "(", "ID", ",", "mission", "=", "'k2'", ")", ":", "# Only K2 supported for now", "assert", "mission", "==", "'k2'", ",", "\"Only the K2 mission is supported for now.\"", "print", "(", "\"Searching for target %d...\"", "%", "ID", ")", "# First check if it is in the database", "season", "=", "missions", ".", "k2", ".", "Season", "(", "ID", ")", "if", "season", "in", "[", "91", ",", "92", ",", "[", "91", ",", "92", "]", "]", ":", "print", "(", "\"Campaign 9 is currently not part of the EVEREST catalog.\"", ")", "return", "elif", "season", "==", "101", ":", "print", "(", "\"The first half of campaign 10 is not currently part of \"", "+", "\"the EVEREST catalog.\"", ")", "return", "elif", "season", "is", "not", "None", ":", "print", "(", "\"Target is in campaign %d of the EVEREST catalog.\"", "%", "season", ")", "return", "# Get the kplr object", "star", "=", "k2plr_client", ".", "k2_star", "(", "ID", ")", "# First check if this is a star", "if", "star", ".", "objtype", ".", "lower", "(", ")", "!=", "\"star\"", ":", "print", "(", "\"Target is of type %s, not STAR, \"", "%", "star", ".", "objtype", "+", "\"and is therefore not included in the EVEREST catalog.\"", ")", "return", "# Let's try to download the pixel data and see what happens", "try", ":", "tpf", "=", "star", ".", "get_target_pixel_files", "(", ")", "except", ":", "print", "(", "\"Unable to download the raw pixel files for this target.\"", ")", "return", "if", "len", "(", "tpf", ")", "==", "0", ":", "print", "(", "\"Raw pixel files are not available for this target. Looks like \"", "+", "\"data may not have been collected for it.\"", ")", "return", "# Perhaps it's in a campaign we haven't gotten to yet", "if", "tpf", "[", "0", "]", ".", "sci_campaign", "not", "in", "missions", ".", "k2", ".", "SEASONS", ":", "print", "(", "\"Targets for campaign %d are not yet available.\"", "%", "tpf", "[", "0", "]", ".", "sci_campaign", ")", "return", "# Let's try to download the K2SFF data", "try", ":", "k2sff", "=", "k2plr", ".", "K2SFF", "(", "ID", ")", "except", ":", "print", "(", "\"Error downloading the K2SFF light curve for this target. \"", "+", "\"Currently, EVEREST uses the K2SFF apertures to perform \"", "+", "\"photometry. This is likely to change in the next version.\"", ")", "return", "# Let's try to get the aperture", "try", ":", "assert", "np", ".", "count_nonzero", "(", "k2sff", ".", "apertures", "[", "15", "]", ")", ",", "\"Invalid aperture.\"", "except", ":", "print", "(", "\"Unable to retrieve the K2SFF aperture for this target. \"", "+", "\"Currently, EVEREST uses the K2SFF apertures to perform \"", "+", "\"photometry. This is likely to change in the next version.\"", ")", "return", "# Perhaps the star is *super* saturated and we didn't bother", "# de-trending it?", "if", "star", ".", "kp", "<", "8", ":", "print", "(", "\"Target has Kp = %.1f and is too saturated \"", "+", "\"for proper de-trending with EVEREST.\"", ")", "return", "# I'm out of ideas", "print", "(", "\"I'm not sure why this target isn't in the EVEREST catalog.\"", "+", "\"You can try de-trending it yourself:\"", ")", "print", "(", "\"http://faculty.washington.edu/rodluger/everest/pipeline.html\"", ")", "return" ]
Why is my target not in the EVEREST database?
[ "Why", "is", "my", "target", "not", "in", "the", "EVEREST", "database?" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L61-L135
rodluger/everest
everest/user.py
DownloadFile
def DownloadFile(ID, season=None, mission='k2', cadence='lc', filename=None, clobber=False): ''' Download a given :py:mod:`everest` file from MAST. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param str filename: The name of the file to download. Default \ :py:obj:`None`, in which case the default \ FITS file is retrieved. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) if season is None: if getattr(missions, mission).ISTARGET(ID): raise ValueError('Target not found in local database. ' + 'Run `everest.Search(%d)` for more information.' % ID) else: raise ValueError('Invalid target ID.') path = getattr(missions, mission).TargetDirectory(ID, season) relpath = getattr(missions, mission).TargetDirectory( ID, season, relative=True) if filename is None: filename = getattr(missions, mission).FITSFile(ID, season, cadence) # Check if file exists if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, filename)) and not clobber: log.info('Found cached file.') return os.path.join(path, filename) # Get file URL log.info('Downloading the file...') fitsurl = getattr(missions, mission).FITSUrl(ID, season) if not fitsurl.endswith('/'): fitsurl += '/' # Download the data r = urllib.request.Request(fitsurl + filename) try: handler = urllib.request.urlopen(r) code = handler.getcode() except (urllib.error.HTTPError, urllib.error.URLError): code = 0 if int(code) == 200: # Read the data data = handler.read() # Atomically save to disk f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, os.path.join(path, filename)) else: # Something went wrong! log.error("Error code {0} for URL '{1}'".format( code, fitsurl + filename)) # If the files can be accessed by `ssh`, let's try that # (development version only!) if EVEREST_FITS is None: raise Exception("Unable to locate the file.") # Get the url inpath = os.path.join(EVEREST_FITS, relpath, filename) outpath = os.path.join(path, filename) # Download the data log.info("Accessing file via `scp`...") subprocess.call(['scp', inpath, outpath]) # Success? if os.path.exists(os.path.join(path, filename)): return os.path.join(path, filename) else: raise Exception("Unable to download the file." + "Run `everest.Search(%d)` to troubleshoot." % ID)
python
def DownloadFile(ID, season=None, mission='k2', cadence='lc', filename=None, clobber=False): ''' Download a given :py:mod:`everest` file from MAST. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param str filename: The name of the file to download. Default \ :py:obj:`None`, in which case the default \ FITS file is retrieved. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) if season is None: if getattr(missions, mission).ISTARGET(ID): raise ValueError('Target not found in local database. ' + 'Run `everest.Search(%d)` for more information.' % ID) else: raise ValueError('Invalid target ID.') path = getattr(missions, mission).TargetDirectory(ID, season) relpath = getattr(missions, mission).TargetDirectory( ID, season, relative=True) if filename is None: filename = getattr(missions, mission).FITSFile(ID, season, cadence) # Check if file exists if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, filename)) and not clobber: log.info('Found cached file.') return os.path.join(path, filename) # Get file URL log.info('Downloading the file...') fitsurl = getattr(missions, mission).FITSUrl(ID, season) if not fitsurl.endswith('/'): fitsurl += '/' # Download the data r = urllib.request.Request(fitsurl + filename) try: handler = urllib.request.urlopen(r) code = handler.getcode() except (urllib.error.HTTPError, urllib.error.URLError): code = 0 if int(code) == 200: # Read the data data = handler.read() # Atomically save to disk f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, os.path.join(path, filename)) else: # Something went wrong! log.error("Error code {0} for URL '{1}'".format( code, fitsurl + filename)) # If the files can be accessed by `ssh`, let's try that # (development version only!) if EVEREST_FITS is None: raise Exception("Unable to locate the file.") # Get the url inpath = os.path.join(EVEREST_FITS, relpath, filename) outpath = os.path.join(path, filename) # Download the data log.info("Accessing file via `scp`...") subprocess.call(['scp', inpath, outpath]) # Success? if os.path.exists(os.path.join(path, filename)): return os.path.join(path, filename) else: raise Exception("Unable to download the file." + "Run `everest.Search(%d)` to troubleshoot." % ID)
[ "def", "DownloadFile", "(", "ID", ",", "season", "=", "None", ",", "mission", "=", "'k2'", ",", "cadence", "=", "'lc'", ",", "filename", "=", "None", ",", "clobber", "=", "False", ")", ":", "# Get season", "if", "season", "is", "None", ":", "season", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "Season", "(", "ID", ")", "if", "hasattr", "(", "season", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a `season` for this target: %s.\"", "%", "season", ")", "if", "season", "is", "None", ":", "if", "getattr", "(", "missions", ",", "mission", ")", ".", "ISTARGET", "(", "ID", ")", ":", "raise", "ValueError", "(", "'Target not found in local database. '", "+", "'Run `everest.Search(%d)` for more information.'", "%", "ID", ")", "else", ":", "raise", "ValueError", "(", "'Invalid target ID.'", ")", "path", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "TargetDirectory", "(", "ID", ",", "season", ")", "relpath", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "TargetDirectory", "(", "ID", ",", "season", ",", "relative", "=", "True", ")", "if", "filename", "is", "None", ":", "filename", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "FITSFile", "(", "ID", ",", "season", ",", "cadence", ")", "# Check if file exists", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", "and", "not", "clobber", ":", "log", ".", "info", "(", "'Found cached file.'", ")", "return", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "# Get file URL", "log", ".", "info", "(", "'Downloading the file...'", ")", "fitsurl", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "FITSUrl", "(", "ID", ",", "season", ")", "if", "not", "fitsurl", ".", "endswith", "(", "'/'", ")", ":", "fitsurl", "+=", "'/'", "# Download the data", "r", "=", "urllib", ".", "request", ".", "Request", "(", "fitsurl", "+", "filename", ")", "try", ":", "handler", "=", "urllib", ".", "request", ".", "urlopen", "(", "r", ")", "code", "=", "handler", ".", "getcode", "(", ")", "except", "(", "urllib", ".", "error", ".", "HTTPError", ",", "urllib", ".", "error", ".", "URLError", ")", ":", "code", "=", "0", "if", "int", "(", "code", ")", "==", "200", ":", "# Read the data", "data", "=", "handler", ".", "read", "(", ")", "# Atomically save to disk", "f", "=", "NamedTemporaryFile", "(", "\"wb\"", ",", "delete", "=", "False", ")", "f", ".", "write", "(", "data", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "f", ".", "close", "(", ")", "shutil", ".", "move", "(", "f", ".", "name", ",", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", "else", ":", "# Something went wrong!", "log", ".", "error", "(", "\"Error code {0} for URL '{1}'\"", ".", "format", "(", "code", ",", "fitsurl", "+", "filename", ")", ")", "# If the files can be accessed by `ssh`, let's try that", "# (development version only!)", "if", "EVEREST_FITS", "is", "None", ":", "raise", "Exception", "(", "\"Unable to locate the file.\"", ")", "# Get the url", "inpath", "=", "os", ".", "path", ".", "join", "(", "EVEREST_FITS", ",", "relpath", ",", "filename", ")", "outpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "# Download the data", "log", ".", "info", "(", "\"Accessing file via `scp`...\"", ")", "subprocess", ".", "call", "(", "[", "'scp'", ",", "inpath", ",", "outpath", "]", ")", "# Success?", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", ":", "return", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "else", ":", "raise", "Exception", "(", "\"Unable to download the file.\"", "+", "\"Run `everest.Search(%d)` to troubleshoot.\"", "%", "ID", ")" ]
Download a given :py:mod:`everest` file from MAST. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param str filename: The name of the file to download. Default \ :py:obj:`None`, in which case the default \ FITS file is retrieved. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False`
[ "Download", "a", "given", ":", "py", ":", "mod", ":", "everest", "file", "from", "MAST", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L137-L228
rodluger/everest
everest/user.py
DVS
def DVS(ID, season=None, mission='k2', clobber=False, cadence='lc', model='nPLD'): ''' Show the data validation summary (DVS) for a given target. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) # Get file name if model == 'nPLD': filename = getattr(missions, mission).DVSFile(ID, season, cadence) else: if cadence == 'sc': filename = model + '.sc.pdf' else: filename = model + '.pdf' file = DownloadFile(ID, season=season, mission=mission, filename=filename, clobber=clobber) try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', file]) elif os.name == 'nt': os.startfile(file) elif os.name == 'posix': subprocess.call(['xdg-open', file]) else: raise Exception("") except: log.info("Unable to open the pdf. Try opening it manually:") log.info(file)
python
def DVS(ID, season=None, mission='k2', clobber=False, cadence='lc', model='nPLD'): ''' Show the data validation summary (DVS) for a given target. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) # Get file name if model == 'nPLD': filename = getattr(missions, mission).DVSFile(ID, season, cadence) else: if cadence == 'sc': filename = model + '.sc.pdf' else: filename = model + '.pdf' file = DownloadFile(ID, season=season, mission=mission, filename=filename, clobber=clobber) try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', file]) elif os.name == 'nt': os.startfile(file) elif os.name == 'posix': subprocess.call(['xdg-open', file]) else: raise Exception("") except: log.info("Unable to open the pdf. Try opening it manually:") log.info(file)
[ "def", "DVS", "(", "ID", ",", "season", "=", "None", ",", "mission", "=", "'k2'", ",", "clobber", "=", "False", ",", "cadence", "=", "'lc'", ",", "model", "=", "'nPLD'", ")", ":", "# Get season", "if", "season", "is", "None", ":", "season", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "Season", "(", "ID", ")", "if", "hasattr", "(", "season", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a `season` for this target: %s.\"", "%", "season", ")", "# Get file name", "if", "model", "==", "'nPLD'", ":", "filename", "=", "getattr", "(", "missions", ",", "mission", ")", ".", "DVSFile", "(", "ID", ",", "season", ",", "cadence", ")", "else", ":", "if", "cadence", "==", "'sc'", ":", "filename", "=", "model", "+", "'.sc.pdf'", "else", ":", "filename", "=", "model", "+", "'.pdf'", "file", "=", "DownloadFile", "(", "ID", ",", "season", "=", "season", ",", "mission", "=", "mission", ",", "filename", "=", "filename", ",", "clobber", "=", "clobber", ")", "try", ":", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", ".", "startswith", "(", "'darwin'", ")", ":", "subprocess", ".", "call", "(", "[", "'open'", ",", "file", "]", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "os", ".", "startfile", "(", "file", ")", "elif", "os", ".", "name", "==", "'posix'", ":", "subprocess", ".", "call", "(", "[", "'xdg-open'", ",", "file", "]", ")", "else", ":", "raise", "Exception", "(", "\"\"", ")", "except", ":", "log", ".", "info", "(", "\"Unable to open the pdf. Try opening it manually:\"", ")", "log", ".", "info", "(", "file", ")" ]
Show the data validation summary (DVS) for a given target. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False`
[ "Show", "the", "data", "validation", "summary", "(", "DVS", ")", "for", "a", "given", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L231-L275
rodluger/everest
everest/user.py
Everest.compute
def compute(self): ''' Re-compute the :py:mod:`everest` model for the given value of :py:obj:`lambda`. For long cadence `k2` light curves, this should take several seconds. For short cadence `k2` light curves, it may take a few minutes. Note that this is a simple wrapper around :py:func:`everest.Basecamp.compute`. ''' # If we're doing iterative PLD, get the normalization if self.model_name == 'iPLD': self._get_norm() # Compute as usual super(Everest, self).compute() # Make NaN cadences NaNs self.flux[self.nanmask] = np.nan
python
def compute(self): ''' Re-compute the :py:mod:`everest` model for the given value of :py:obj:`lambda`. For long cadence `k2` light curves, this should take several seconds. For short cadence `k2` light curves, it may take a few minutes. Note that this is a simple wrapper around :py:func:`everest.Basecamp.compute`. ''' # If we're doing iterative PLD, get the normalization if self.model_name == 'iPLD': self._get_norm() # Compute as usual super(Everest, self).compute() # Make NaN cadences NaNs self.flux[self.nanmask] = np.nan
[ "def", "compute", "(", "self", ")", ":", "# If we're doing iterative PLD, get the normalization", "if", "self", ".", "model_name", "==", "'iPLD'", ":", "self", ".", "_get_norm", "(", ")", "# Compute as usual", "super", "(", "Everest", ",", "self", ")", ".", "compute", "(", ")", "# Make NaN cadences NaNs", "self", ".", "flux", "[", "self", ".", "nanmask", "]", "=", "np", ".", "nan" ]
Re-compute the :py:mod:`everest` model for the given value of :py:obj:`lambda`. For long cadence `k2` light curves, this should take several seconds. For short cadence `k2` light curves, it may take a few minutes. Note that this is a simple wrapper around :py:func:`everest.Basecamp.compute`.
[ "Re", "-", "compute", "the", ":", "py", ":", "mod", ":", "everest", "model", "for", "the", "given", "value", "of", ":", "py", ":", "obj", ":", "lambda", ".", "For", "long", "cadence", "k2", "light", "curves", "this", "should", "take", "several", "seconds", ".", "For", "short", "cadence", "k2", "light", "curves", "it", "may", "take", "a", "few", "minutes", ".", "Note", "that", "this", "is", "a", "simple", "wrapper", "around", ":", "py", ":", "func", ":", "everest", ".", "Basecamp", ".", "compute", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L368-L387
rodluger/everest
everest/user.py
Everest._get_norm
def _get_norm(self): ''' Computes the PLD flux normalization array. ..note :: `iPLD` model **only**. ''' log.info('Computing the PLD normalization...') # Loop over all chunks mod = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Unmasked chunk c = self.get_chunk(b) # Masked chunk (original mask plus user transit mask) inds = np.array( list(set(np.concatenate([self.transitmask, self.recmask]))), dtype=int) M = np.delete(np.arange(len(self.time)), inds, axis=0) if b > 0: m = M[(M > self.breakpoints[b - 1] - self.bpad) & (M <= self.breakpoints[b] + self.bpad)] else: m = M[M <= self.breakpoints[b] + self.bpad] # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): XM = self.X(n, m) XC = self.X(n, c) A += self.reclam[b][n] * np.dot(XM, XM.T) B += self.reclam[b][n] * np.dot(XC, XM.T) del XM, XC W = np.linalg.solve(mK + A, f) mod[b] = np.dot(B, W) del A, B, W # Join the chunks after applying the correct offset if len(mod) > 1: # First chunk model = mod[0][:-self.bpad] # Center chunks for m in mod[1:-1]: offset = model[-1] - m[self.bpad - 1] model = np.concatenate( [model, m[self.bpad:-self.bpad] + offset]) # Last chunk offset = model[-1] - mod[-1][self.bpad - 1] model = np.concatenate([model, mod[-1][self.bpad:] + offset]) else: model = mod[0] # Subtract the global median model -= np.nanmedian(model) # Save the norm self._norm = self.fraw - model
python
def _get_norm(self): ''' Computes the PLD flux normalization array. ..note :: `iPLD` model **only**. ''' log.info('Computing the PLD normalization...') # Loop over all chunks mod = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Unmasked chunk c = self.get_chunk(b) # Masked chunk (original mask plus user transit mask) inds = np.array( list(set(np.concatenate([self.transitmask, self.recmask]))), dtype=int) M = np.delete(np.arange(len(self.time)), inds, axis=0) if b > 0: m = M[(M > self.breakpoints[b - 1] - self.bpad) & (M <= self.breakpoints[b] + self.bpad)] else: m = M[M <= self.breakpoints[b] + self.bpad] # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): XM = self.X(n, m) XC = self.X(n, c) A += self.reclam[b][n] * np.dot(XM, XM.T) B += self.reclam[b][n] * np.dot(XC, XM.T) del XM, XC W = np.linalg.solve(mK + A, f) mod[b] = np.dot(B, W) del A, B, W # Join the chunks after applying the correct offset if len(mod) > 1: # First chunk model = mod[0][:-self.bpad] # Center chunks for m in mod[1:-1]: offset = model[-1] - m[self.bpad - 1] model = np.concatenate( [model, m[self.bpad:-self.bpad] + offset]) # Last chunk offset = model[-1] - mod[-1][self.bpad - 1] model = np.concatenate([model, mod[-1][self.bpad:] + offset]) else: model = mod[0] # Subtract the global median model -= np.nanmedian(model) # Save the norm self._norm = self.fraw - model
[ "def", "_get_norm", "(", "self", ")", ":", "log", ".", "info", "(", "'Computing the PLD normalization...'", ")", "# Loop over all chunks", "mod", "=", "[", "None", "for", "b", "in", "self", ".", "breakpoints", "]", "for", "b", ",", "brkpt", "in", "enumerate", "(", "self", ".", "breakpoints", ")", ":", "# Unmasked chunk", "c", "=", "self", ".", "get_chunk", "(", "b", ")", "# Masked chunk (original mask plus user transit mask)", "inds", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "transitmask", ",", "self", ".", "recmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "M", "=", "np", ".", "delete", "(", "np", ".", "arange", "(", "len", "(", "self", ".", "time", ")", ")", ",", "inds", ",", "axis", "=", "0", ")", "if", "b", ">", "0", ":", "m", "=", "M", "[", "(", "M", ">", "self", ".", "breakpoints", "[", "b", "-", "1", "]", "-", "self", ".", "bpad", ")", "&", "(", "M", "<=", "self", ".", "breakpoints", "[", "b", "]", "+", "self", ".", "bpad", ")", "]", "else", ":", "m", "=", "M", "[", "M", "<=", "self", ".", "breakpoints", "[", "b", "]", "+", "self", ".", "bpad", "]", "# This block of the masked covariance matrix", "mK", "=", "GetCovariance", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "self", ".", "time", "[", "m", "]", ",", "self", ".", "fraw_err", "[", "m", "]", ")", "# Get median", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "fraw", "[", "m", "]", ")", "# Normalize the flux", "f", "=", "self", ".", "fraw", "[", "m", "]", "-", "med", "# The X^2 matrices", "A", "=", "np", ".", "zeros", "(", "(", "len", "(", "m", ")", ",", "len", "(", "m", ")", ")", ")", "B", "=", "np", ".", "zeros", "(", "(", "len", "(", "c", ")", ",", "len", "(", "m", ")", ")", ")", "# Loop over all orders", "for", "n", "in", "range", "(", "self", ".", "pld_order", ")", ":", "XM", "=", "self", ".", "X", "(", "n", ",", "m", ")", "XC", "=", "self", ".", "X", "(", "n", ",", "c", ")", "A", "+=", "self", ".", "reclam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XM", ",", "XM", ".", "T", ")", "B", "+=", "self", ".", "reclam", "[", "b", "]", "[", "n", "]", "*", "np", ".", "dot", "(", "XC", ",", "XM", ".", "T", ")", "del", "XM", ",", "XC", "W", "=", "np", ".", "linalg", ".", "solve", "(", "mK", "+", "A", ",", "f", ")", "mod", "[", "b", "]", "=", "np", ".", "dot", "(", "B", ",", "W", ")", "del", "A", ",", "B", ",", "W", "# Join the chunks after applying the correct offset", "if", "len", "(", "mod", ")", ">", "1", ":", "# First chunk", "model", "=", "mod", "[", "0", "]", "[", ":", "-", "self", ".", "bpad", "]", "# Center chunks", "for", "m", "in", "mod", "[", "1", ":", "-", "1", "]", ":", "offset", "=", "model", "[", "-", "1", "]", "-", "m", "[", "self", ".", "bpad", "-", "1", "]", "model", "=", "np", ".", "concatenate", "(", "[", "model", ",", "m", "[", "self", ".", "bpad", ":", "-", "self", ".", "bpad", "]", "+", "offset", "]", ")", "# Last chunk", "offset", "=", "model", "[", "-", "1", "]", "-", "mod", "[", "-", "1", "]", "[", "self", ".", "bpad", "-", "1", "]", "model", "=", "np", ".", "concatenate", "(", "[", "model", ",", "mod", "[", "-", "1", "]", "[", "self", ".", "bpad", ":", "]", "+", "offset", "]", ")", "else", ":", "model", "=", "mod", "[", "0", "]", "# Subtract the global median", "model", "-=", "np", ".", "nanmedian", "(", "model", ")", "# Save the norm", "self", ".", "_norm", "=", "self", ".", "fraw", "-", "model" ]
Computes the PLD flux normalization array. ..note :: `iPLD` model **only**.
[ "Computes", "the", "PLD", "flux", "normalization", "array", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L389-L467
rodluger/everest
everest/user.py
Everest.load_fits
def load_fits(self): ''' Load the FITS file from disk and populate the class instance with its data. ''' log.info("Loading FITS file for %d." % (self.ID)) with pyfits.open(self.fitsfile) as f: # Params and long cadence data self.loaded = True self.is_parent = False try: self.X1N = f[2].data['X1N'] except KeyError: self.X1N = None self.aperture = f[3].data self.aperture_name = f[1].header['APNAME'] try: self.bkg = f[1].data['BKG'] except KeyError: self.bkg = 0. self.bpad = f[1].header['BPAD'] self.cbv_minstars = [] self.cbv_num = f[1].header.get('CBVNUM', 1) self.cbv_niter = f[1].header['CBVNITER'] self.cbv_win = f[1].header['CBVWIN'] self.cbv_order = f[1].header['CBVORD'] self.cadn = f[1].data['CADN'] self.cdivs = f[1].header['CDIVS'] self.cdpp = f[1].header['CDPP'] self.cdppr = f[1].header['CDPPR'] self.cdppv = f[1].header['CDPPV'] self.cdppg = f[1].header['CDPPG'] self.cv_min = f[1].header['CVMIN'] self.fpix = f[2].data['FPIX'] self.pixel_images = [f[4].data['STAMP1'], f[4].data['STAMP2'], f[4].data['STAMP3']] self.fraw = f[1].data['FRAW'] self.fraw_err = f[1].data['FRAW_ERR'] self.giter = f[1].header['GITER'] self.gmaxf = f[1].header.get('GMAXF', 200) self.gp_factor = f[1].header['GPFACTOR'] try: self.hires = f[5].data except: self.hires = None self.kernel_params = np.array([f[1].header['GPWHITE'], f[1].header['GPRED'], f[1].header['GPTAU']]) try: self.kernel = f[1].header['KERNEL'] self.kernel_params = np.append( self.kernel_params, [f[1].header['GPGAMMA'], f[1].header['GPPER']]) except KeyError: self.kernel = 'Basic' self.pld_order = f[1].header['PLDORDER'] self.lam_idx = self.pld_order self.leps = f[1].header['LEPS'] self.mag = f[0].header['KEPMAG'] self.max_pixels = f[1].header['MAXPIX'] self.model = self.fraw - f[1].data['FLUX'] self.nearby = [] for i in range(99): try: ID = f[1].header['NRBY%02dID' % (i + 1)] x = f[1].header['NRBY%02dX' % (i + 1)] y = f[1].header['NRBY%02dY' % (i + 1)] mag = f[1].header['NRBY%02dM' % (i + 1)] x0 = f[1].header['NRBY%02dX0' % (i + 1)] y0 = f[1].header['NRBY%02dY0' % (i + 1)] self.nearby.append( {'ID': ID, 'x': x, 'y': y, 'mag': mag, 'x0': x0, 'y0': y0}) except KeyError: break self.neighbors = [] for c in range(99): try: self.neighbors.append(f[1].header['NEIGH%02d' % (c + 1)]) except KeyError: break self.oiter = f[1].header['OITER'] self.optimize_gp = f[1].header['OPTGP'] self.osigma = f[1].header['OSIGMA'] self.planets = [] for i in range(99): try: t0 = f[1].header['P%02dT0' % (i + 1)] per = f[1].header['P%02dPER' % (i + 1)] dur = f[1].header['P%02dDUR' % (i + 1)] self.planets.append((t0, per, dur)) except KeyError: break self.quality = f[1].data['QUALITY'] self.saturated = f[1].header['SATUR'] self.saturation_tolerance = f[1].header['SATTOL'] self.time = f[1].data['TIME'] self._norm = np.array(self.fraw) # Chunk arrays self.breakpoints = [] self.cdpp_arr = [] self.cdppv_arr = [] self.cdppr_arr = [] for c in range(99): try: self.breakpoints.append(f[1].header['BRKPT%02d' % (c + 1)]) self.cdpp_arr.append(f[1].header['CDPP%02d' % (c + 1)]) self.cdppr_arr.append(f[1].header['CDPPR%02d' % (c + 1)]) self.cdppv_arr.append(f[1].header['CDPPV%02d' % (c + 1)]) except KeyError: break self.lam = [[f[1].header['LAMB%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] if self.model_name == 'iPLD': self.reclam = [[f[1].header['RECL%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] # Masks self.badmask = np.where(self.quality & 2 ** (QUALITY_BAD - 1))[0] self.nanmask = np.where(self.quality & 2 ** (QUALITY_NAN - 1))[0] self.outmask = np.where(self.quality & 2 ** (QUALITY_OUT - 1))[0] self.recmask = np.where(self.quality & 2 ** (QUALITY_REC - 1))[0] self.transitmask = np.where( self.quality & 2 ** (QUALITY_TRN - 1))[0] # CBVs self.XCBV = np.empty((len(self.time), 0)) for i in range(99): try: self.XCBV = np.hstack( [self.XCBV, f[1].data['CBV%02d' % (i + 1)].reshape(-1, 1)]) except KeyError: break # These are not stored in the fits file; we don't need them self.saturated_aperture_name = None self.apertures = None self.Xpos = None self.Ypos = None self.fpix_err = None self.parent_model = None self.lambda_arr = None self.meta = None self._transit_model = None self.transit_depth = None
python
def load_fits(self): ''' Load the FITS file from disk and populate the class instance with its data. ''' log.info("Loading FITS file for %d." % (self.ID)) with pyfits.open(self.fitsfile) as f: # Params and long cadence data self.loaded = True self.is_parent = False try: self.X1N = f[2].data['X1N'] except KeyError: self.X1N = None self.aperture = f[3].data self.aperture_name = f[1].header['APNAME'] try: self.bkg = f[1].data['BKG'] except KeyError: self.bkg = 0. self.bpad = f[1].header['BPAD'] self.cbv_minstars = [] self.cbv_num = f[1].header.get('CBVNUM', 1) self.cbv_niter = f[1].header['CBVNITER'] self.cbv_win = f[1].header['CBVWIN'] self.cbv_order = f[1].header['CBVORD'] self.cadn = f[1].data['CADN'] self.cdivs = f[1].header['CDIVS'] self.cdpp = f[1].header['CDPP'] self.cdppr = f[1].header['CDPPR'] self.cdppv = f[1].header['CDPPV'] self.cdppg = f[1].header['CDPPG'] self.cv_min = f[1].header['CVMIN'] self.fpix = f[2].data['FPIX'] self.pixel_images = [f[4].data['STAMP1'], f[4].data['STAMP2'], f[4].data['STAMP3']] self.fraw = f[1].data['FRAW'] self.fraw_err = f[1].data['FRAW_ERR'] self.giter = f[1].header['GITER'] self.gmaxf = f[1].header.get('GMAXF', 200) self.gp_factor = f[1].header['GPFACTOR'] try: self.hires = f[5].data except: self.hires = None self.kernel_params = np.array([f[1].header['GPWHITE'], f[1].header['GPRED'], f[1].header['GPTAU']]) try: self.kernel = f[1].header['KERNEL'] self.kernel_params = np.append( self.kernel_params, [f[1].header['GPGAMMA'], f[1].header['GPPER']]) except KeyError: self.kernel = 'Basic' self.pld_order = f[1].header['PLDORDER'] self.lam_idx = self.pld_order self.leps = f[1].header['LEPS'] self.mag = f[0].header['KEPMAG'] self.max_pixels = f[1].header['MAXPIX'] self.model = self.fraw - f[1].data['FLUX'] self.nearby = [] for i in range(99): try: ID = f[1].header['NRBY%02dID' % (i + 1)] x = f[1].header['NRBY%02dX' % (i + 1)] y = f[1].header['NRBY%02dY' % (i + 1)] mag = f[1].header['NRBY%02dM' % (i + 1)] x0 = f[1].header['NRBY%02dX0' % (i + 1)] y0 = f[1].header['NRBY%02dY0' % (i + 1)] self.nearby.append( {'ID': ID, 'x': x, 'y': y, 'mag': mag, 'x0': x0, 'y0': y0}) except KeyError: break self.neighbors = [] for c in range(99): try: self.neighbors.append(f[1].header['NEIGH%02d' % (c + 1)]) except KeyError: break self.oiter = f[1].header['OITER'] self.optimize_gp = f[1].header['OPTGP'] self.osigma = f[1].header['OSIGMA'] self.planets = [] for i in range(99): try: t0 = f[1].header['P%02dT0' % (i + 1)] per = f[1].header['P%02dPER' % (i + 1)] dur = f[1].header['P%02dDUR' % (i + 1)] self.planets.append((t0, per, dur)) except KeyError: break self.quality = f[1].data['QUALITY'] self.saturated = f[1].header['SATUR'] self.saturation_tolerance = f[1].header['SATTOL'] self.time = f[1].data['TIME'] self._norm = np.array(self.fraw) # Chunk arrays self.breakpoints = [] self.cdpp_arr = [] self.cdppv_arr = [] self.cdppr_arr = [] for c in range(99): try: self.breakpoints.append(f[1].header['BRKPT%02d' % (c + 1)]) self.cdpp_arr.append(f[1].header['CDPP%02d' % (c + 1)]) self.cdppr_arr.append(f[1].header['CDPPR%02d' % (c + 1)]) self.cdppv_arr.append(f[1].header['CDPPV%02d' % (c + 1)]) except KeyError: break self.lam = [[f[1].header['LAMB%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] if self.model_name == 'iPLD': self.reclam = [[f[1].header['RECL%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] # Masks self.badmask = np.where(self.quality & 2 ** (QUALITY_BAD - 1))[0] self.nanmask = np.where(self.quality & 2 ** (QUALITY_NAN - 1))[0] self.outmask = np.where(self.quality & 2 ** (QUALITY_OUT - 1))[0] self.recmask = np.where(self.quality & 2 ** (QUALITY_REC - 1))[0] self.transitmask = np.where( self.quality & 2 ** (QUALITY_TRN - 1))[0] # CBVs self.XCBV = np.empty((len(self.time), 0)) for i in range(99): try: self.XCBV = np.hstack( [self.XCBV, f[1].data['CBV%02d' % (i + 1)].reshape(-1, 1)]) except KeyError: break # These are not stored in the fits file; we don't need them self.saturated_aperture_name = None self.apertures = None self.Xpos = None self.Ypos = None self.fpix_err = None self.parent_model = None self.lambda_arr = None self.meta = None self._transit_model = None self.transit_depth = None
[ "def", "load_fits", "(", "self", ")", ":", "log", ".", "info", "(", "\"Loading FITS file for %d.\"", "%", "(", "self", ".", "ID", ")", ")", "with", "pyfits", ".", "open", "(", "self", ".", "fitsfile", ")", "as", "f", ":", "# Params and long cadence data", "self", ".", "loaded", "=", "True", "self", ".", "is_parent", "=", "False", "try", ":", "self", ".", "X1N", "=", "f", "[", "2", "]", ".", "data", "[", "'X1N'", "]", "except", "KeyError", ":", "self", ".", "X1N", "=", "None", "self", ".", "aperture", "=", "f", "[", "3", "]", ".", "data", "self", ".", "aperture_name", "=", "f", "[", "1", "]", ".", "header", "[", "'APNAME'", "]", "try", ":", "self", ".", "bkg", "=", "f", "[", "1", "]", ".", "data", "[", "'BKG'", "]", "except", "KeyError", ":", "self", ".", "bkg", "=", "0.", "self", ".", "bpad", "=", "f", "[", "1", "]", ".", "header", "[", "'BPAD'", "]", "self", ".", "cbv_minstars", "=", "[", "]", "self", ".", "cbv_num", "=", "f", "[", "1", "]", ".", "header", ".", "get", "(", "'CBVNUM'", ",", "1", ")", "self", ".", "cbv_niter", "=", "f", "[", "1", "]", ".", "header", "[", "'CBVNITER'", "]", "self", ".", "cbv_win", "=", "f", "[", "1", "]", ".", "header", "[", "'CBVWIN'", "]", "self", ".", "cbv_order", "=", "f", "[", "1", "]", ".", "header", "[", "'CBVORD'", "]", "self", ".", "cadn", "=", "f", "[", "1", "]", ".", "data", "[", "'CADN'", "]", "self", ".", "cdivs", "=", "f", "[", "1", "]", ".", "header", "[", "'CDIVS'", "]", "self", ".", "cdpp", "=", "f", "[", "1", "]", ".", "header", "[", "'CDPP'", "]", "self", ".", "cdppr", "=", "f", "[", "1", "]", ".", "header", "[", "'CDPPR'", "]", "self", ".", "cdppv", "=", "f", "[", "1", "]", ".", "header", "[", "'CDPPV'", "]", "self", ".", "cdppg", "=", "f", "[", "1", "]", ".", "header", "[", "'CDPPG'", "]", "self", ".", "cv_min", "=", "f", "[", "1", "]", ".", "header", "[", "'CVMIN'", "]", "self", ".", "fpix", "=", "f", "[", "2", "]", ".", "data", "[", "'FPIX'", "]", "self", ".", "pixel_images", "=", "[", "f", "[", "4", "]", ".", "data", "[", "'STAMP1'", "]", ",", "f", "[", "4", "]", ".", "data", "[", "'STAMP2'", "]", ",", "f", "[", "4", "]", ".", "data", "[", "'STAMP3'", "]", "]", "self", ".", "fraw", "=", "f", "[", "1", "]", ".", "data", "[", "'FRAW'", "]", "self", ".", "fraw_err", "=", "f", "[", "1", "]", ".", "data", "[", "'FRAW_ERR'", "]", "self", ".", "giter", "=", "f", "[", "1", "]", ".", "header", "[", "'GITER'", "]", "self", ".", "gmaxf", "=", "f", "[", "1", "]", ".", "header", ".", "get", "(", "'GMAXF'", ",", "200", ")", "self", ".", "gp_factor", "=", "f", "[", "1", "]", ".", "header", "[", "'GPFACTOR'", "]", "try", ":", "self", ".", "hires", "=", "f", "[", "5", "]", ".", "data", "except", ":", "self", ".", "hires", "=", "None", "self", ".", "kernel_params", "=", "np", ".", "array", "(", "[", "f", "[", "1", "]", ".", "header", "[", "'GPWHITE'", "]", ",", "f", "[", "1", "]", ".", "header", "[", "'GPRED'", "]", ",", "f", "[", "1", "]", ".", "header", "[", "'GPTAU'", "]", "]", ")", "try", ":", "self", ".", "kernel", "=", "f", "[", "1", "]", ".", "header", "[", "'KERNEL'", "]", "self", ".", "kernel_params", "=", "np", ".", "append", "(", "self", ".", "kernel_params", ",", "[", "f", "[", "1", "]", ".", "header", "[", "'GPGAMMA'", "]", ",", "f", "[", "1", "]", ".", "header", "[", "'GPPER'", "]", "]", ")", "except", "KeyError", ":", "self", ".", "kernel", "=", "'Basic'", "self", ".", "pld_order", "=", "f", "[", "1", "]", ".", "header", "[", "'PLDORDER'", "]", "self", ".", "lam_idx", "=", "self", ".", "pld_order", "self", ".", "leps", "=", "f", "[", "1", "]", ".", "header", "[", "'LEPS'", "]", "self", ".", "mag", "=", "f", "[", "0", "]", ".", "header", "[", "'KEPMAG'", "]", "self", ".", "max_pixels", "=", "f", "[", "1", "]", ".", "header", "[", "'MAXPIX'", "]", "self", ".", "model", "=", "self", ".", "fraw", "-", "f", "[", "1", "]", ".", "data", "[", "'FLUX'", "]", "self", ".", "nearby", "=", "[", "]", "for", "i", "in", "range", "(", "99", ")", ":", "try", ":", "ID", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dID'", "%", "(", "i", "+", "1", ")", "]", "x", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dX'", "%", "(", "i", "+", "1", ")", "]", "y", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dY'", "%", "(", "i", "+", "1", ")", "]", "mag", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dM'", "%", "(", "i", "+", "1", ")", "]", "x0", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dX0'", "%", "(", "i", "+", "1", ")", "]", "y0", "=", "f", "[", "1", "]", ".", "header", "[", "'NRBY%02dY0'", "%", "(", "i", "+", "1", ")", "]", "self", ".", "nearby", ".", "append", "(", "{", "'ID'", ":", "ID", ",", "'x'", ":", "x", ",", "'y'", ":", "y", ",", "'mag'", ":", "mag", ",", "'x0'", ":", "x0", ",", "'y0'", ":", "y0", "}", ")", "except", "KeyError", ":", "break", "self", ".", "neighbors", "=", "[", "]", "for", "c", "in", "range", "(", "99", ")", ":", "try", ":", "self", ".", "neighbors", ".", "append", "(", "f", "[", "1", "]", ".", "header", "[", "'NEIGH%02d'", "%", "(", "c", "+", "1", ")", "]", ")", "except", "KeyError", ":", "break", "self", ".", "oiter", "=", "f", "[", "1", "]", ".", "header", "[", "'OITER'", "]", "self", ".", "optimize_gp", "=", "f", "[", "1", "]", ".", "header", "[", "'OPTGP'", "]", "self", ".", "osigma", "=", "f", "[", "1", "]", ".", "header", "[", "'OSIGMA'", "]", "self", ".", "planets", "=", "[", "]", "for", "i", "in", "range", "(", "99", ")", ":", "try", ":", "t0", "=", "f", "[", "1", "]", ".", "header", "[", "'P%02dT0'", "%", "(", "i", "+", "1", ")", "]", "per", "=", "f", "[", "1", "]", ".", "header", "[", "'P%02dPER'", "%", "(", "i", "+", "1", ")", "]", "dur", "=", "f", "[", "1", "]", ".", "header", "[", "'P%02dDUR'", "%", "(", "i", "+", "1", ")", "]", "self", ".", "planets", ".", "append", "(", "(", "t0", ",", "per", ",", "dur", ")", ")", "except", "KeyError", ":", "break", "self", ".", "quality", "=", "f", "[", "1", "]", ".", "data", "[", "'QUALITY'", "]", "self", ".", "saturated", "=", "f", "[", "1", "]", ".", "header", "[", "'SATUR'", "]", "self", ".", "saturation_tolerance", "=", "f", "[", "1", "]", ".", "header", "[", "'SATTOL'", "]", "self", ".", "time", "=", "f", "[", "1", "]", ".", "data", "[", "'TIME'", "]", "self", ".", "_norm", "=", "np", ".", "array", "(", "self", ".", "fraw", ")", "# Chunk arrays", "self", ".", "breakpoints", "=", "[", "]", "self", ".", "cdpp_arr", "=", "[", "]", "self", ".", "cdppv_arr", "=", "[", "]", "self", ".", "cdppr_arr", "=", "[", "]", "for", "c", "in", "range", "(", "99", ")", ":", "try", ":", "self", ".", "breakpoints", ".", "append", "(", "f", "[", "1", "]", ".", "header", "[", "'BRKPT%02d'", "%", "(", "c", "+", "1", ")", "]", ")", "self", ".", "cdpp_arr", ".", "append", "(", "f", "[", "1", "]", ".", "header", "[", "'CDPP%02d'", "%", "(", "c", "+", "1", ")", "]", ")", "self", ".", "cdppr_arr", ".", "append", "(", "f", "[", "1", "]", ".", "header", "[", "'CDPPR%02d'", "%", "(", "c", "+", "1", ")", "]", ")", "self", ".", "cdppv_arr", ".", "append", "(", "f", "[", "1", "]", ".", "header", "[", "'CDPPV%02d'", "%", "(", "c", "+", "1", ")", "]", ")", "except", "KeyError", ":", "break", "self", ".", "lam", "=", "[", "[", "f", "[", "1", "]", ".", "header", "[", "'LAMB%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", "]", "for", "o", "in", "range", "(", "self", ".", "pld_order", ")", "]", "for", "c", "in", "range", "(", "len", "(", "self", ".", "breakpoints", ")", ")", "]", "if", "self", ".", "model_name", "==", "'iPLD'", ":", "self", ".", "reclam", "=", "[", "[", "f", "[", "1", "]", ".", "header", "[", "'RECL%02d%02d'", "%", "(", "c", "+", "1", ",", "o", "+", "1", ")", "]", "for", "o", "in", "range", "(", "self", ".", "pld_order", ")", "]", "for", "c", "in", "range", "(", "len", "(", "self", ".", "breakpoints", ")", ")", "]", "# Masks", "self", ".", "badmask", "=", "np", ".", "where", "(", "self", ".", "quality", "&", "2", "**", "(", "QUALITY_BAD", "-", "1", ")", ")", "[", "0", "]", "self", ".", "nanmask", "=", "np", ".", "where", "(", "self", ".", "quality", "&", "2", "**", "(", "QUALITY_NAN", "-", "1", ")", ")", "[", "0", "]", "self", ".", "outmask", "=", "np", ".", "where", "(", "self", ".", "quality", "&", "2", "**", "(", "QUALITY_OUT", "-", "1", ")", ")", "[", "0", "]", "self", ".", "recmask", "=", "np", ".", "where", "(", "self", ".", "quality", "&", "2", "**", "(", "QUALITY_REC", "-", "1", ")", ")", "[", "0", "]", "self", ".", "transitmask", "=", "np", ".", "where", "(", "self", ".", "quality", "&", "2", "**", "(", "QUALITY_TRN", "-", "1", ")", ")", "[", "0", "]", "# CBVs", "self", ".", "XCBV", "=", "np", ".", "empty", "(", "(", "len", "(", "self", ".", "time", ")", ",", "0", ")", ")", "for", "i", "in", "range", "(", "99", ")", ":", "try", ":", "self", ".", "XCBV", "=", "np", ".", "hstack", "(", "[", "self", ".", "XCBV", ",", "f", "[", "1", "]", ".", "data", "[", "'CBV%02d'", "%", "(", "i", "+", "1", ")", "]", ".", "reshape", "(", "-", "1", ",", "1", ")", "]", ")", "except", "KeyError", ":", "break", "# These are not stored in the fits file; we don't need them", "self", ".", "saturated_aperture_name", "=", "None", "self", ".", "apertures", "=", "None", "self", ".", "Xpos", "=", "None", "self", ".", "Ypos", "=", "None", "self", ".", "fpix_err", "=", "None", "self", ".", "parent_model", "=", "None", "self", ".", "lambda_arr", "=", "None", "self", ".", "meta", "=", "None", "self", ".", "_transit_model", "=", "None", "self", ".", "transit_depth", "=", "None" ]
Load the FITS file from disk and populate the class instance with its data.
[ "Load", "the", "FITS", "file", "from", "disk", "and", "populate", "the", "class", "instance", "with", "its", "data", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L469-L621
rodluger/everest
everest/user.py
Everest.plot_aperture
def plot_aperture(self, show=True): ''' Plot sample postage stamps for the target with the aperture outline marked, as well as a high-res target image (if available). :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` ''' # Set up the axes fig, ax = pl.subplots(2, 2, figsize=(6, 8)) fig.subplots_adjust(top=0.975, bottom=0.025, left=0.05, right=0.95, hspace=0.05, wspace=0.05) ax = ax.flatten() fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) super(Everest, self).plot_aperture(ax, labelsize=12) if show: pl.show() pl.close() else: return fig, ax
python
def plot_aperture(self, show=True): ''' Plot sample postage stamps for the target with the aperture outline marked, as well as a high-res target image (if available). :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` ''' # Set up the axes fig, ax = pl.subplots(2, 2, figsize=(6, 8)) fig.subplots_adjust(top=0.975, bottom=0.025, left=0.05, right=0.95, hspace=0.05, wspace=0.05) ax = ax.flatten() fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) super(Everest, self).plot_aperture(ax, labelsize=12) if show: pl.show() pl.close() else: return fig, ax
[ "def", "plot_aperture", "(", "self", ",", "show", "=", "True", ")", ":", "# Set up the axes", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "2", ",", "2", ",", "figsize", "=", "(", "6", ",", "8", ")", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.975", ",", "bottom", "=", "0.025", ",", "left", "=", "0.05", ",", "right", "=", "0.95", ",", "hspace", "=", "0.05", ",", "wspace", "=", "0.05", ")", "ax", "=", "ax", ".", "flatten", "(", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'%s %d'", "%", "(", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", ")", "super", "(", "Everest", ",", "self", ")", ".", "plot_aperture", "(", "ax", ",", "labelsize", "=", "12", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "return", "fig", ",", "ax" ]
Plot sample postage stamps for the target with the aperture outline marked, as well as a high-res target image (if available). :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True`
[ "Plot", "sample", "postage", "stamps", "for", "the", "target", "with", "the", "aperture", "outline", "marked", "as", "well", "as", "a", "high", "-", "res", "target", "image", "(", "if", "available", ")", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L623-L646
rodluger/everest
everest/user.py
Everest.plot
def plot(self, show=True, plot_raw=True, plot_gp=True, plot_bad=True, plot_out=True, plot_cbv=True, simple=False): ''' Plots the final de-trended light curve. :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` :param bool plot_raw: Show the raw light curve? Default :py:obj:`True` :param bool plot_gp: Show the GP model prediction? \ Default :py:obj:`True` :param bool plot_bad: Show and indicate the bad data points? \ Default :py:obj:`True` :param bool plot_out: Show and indicate the outliers? \ Default :py:obj:`True` :param bool plot_cbv: Plot the CBV-corrected light curve? \ Default :py:obj:`True`. If :py:obj:`False`, plots the \ de-trended but uncorrected light curve. ''' log.info('Plotting the light curve...') # Set up axes if plot_raw: fig, axes = pl.subplots(2, figsize=(13, 9), sharex=True) fig.subplots_adjust(hspace=0.1) axes = [axes[1], axes[0]] if plot_cbv: fluxes = [self.fcor, self.fraw] else: fluxes = [self.flux, self.fraw] labels = ['EVEREST Flux', 'Raw Flux'] else: fig, axes = pl.subplots(1, figsize=(13, 6)) axes = [axes] if plot_cbv: fluxes = [self.fcor] else: fluxes = [self.flux] labels = ['EVEREST Flux'] fig.canvas.set_window_title('EVEREST Light curve') # Set up some stuff time = self.time badmask = self.badmask nanmask = self.nanmask outmask = self.outmask transitmask = self.transitmask fraw_err = self.fraw_err breakpoints = self.breakpoints if self.cadence == 'sc': ms = 2 else: ms = 4 # Get the cdpps cdpps = [[self.get_cdpp(self.flux), self.get_cdpp_arr(self.flux)], [self.get_cdpp(self.fraw), self.get_cdpp_arr(self.fraw)]] self.cdpp = cdpps[0][0] self.cdpp_arr = cdpps[0][1] for n, ax, flux, label, c in zip([0, 1], axes, fluxes, labels, cdpps): # Initialize CDPP cdpp = c[0] cdpp_arr = c[1] # Plot the good data points ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the outliers bnmask = np.array( list(set(np.concatenate([badmask, nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[outmask] def O2(x): return x[bmask] def O3(x): return x[transitmask] if plot_out: ax.plot(O1(time), O1(flux), ls='none', color="#777777", marker='.', markersize=ms, alpha=0.5) if plot_bad: ax.plot(O2(time), O2(flux), 'r.', markersize=ms, alpha=0.25) ax.plot(O3(time), O3(flux), 'b.', markersize=ms, alpha=0.25) # Plot the GP if n == 0 and plot_gp and self.cadence != 'sc': gp = GP(self.kernel, self.kernel_params) gp.compute(self.apply_mask(time), self.apply_mask(fraw_err)) med = np.nanmedian(self.apply_mask(flux)) y, _ = gp.predict(self.apply_mask(flux) - med, time) y += med ax.plot(self.apply_mask(time), self.apply_mask( y), 'r-', lw=0.5, alpha=0.5) # Appearance if n == 0: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) ax.set_ylabel(label, fontsize=18) for brkpt in breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) if len(cdpp_arr) == 2: ax.annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.annotate('%.2f ppm' % cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) elif len(cdpp_arr) < 6: for n in range(len(cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10, zorder=99, color='r') else: ax.annotate('%.2f ppm' % cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux f = np.concatenate([np.delete(f, bnmask) for f in fluxes]) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Show total CDPP improvement pl.figtext(0.5, 0.94, '%s %d' % (self._mission.IDSTRING, self.ID), fontsize=18, ha='center', va='bottom') pl.figtext(0.5, 0.905, r'$%.2f\ \mathrm{ppm} \rightarrow %.2f\ \mathrm{ppm}$' % (self.cdppr, self.cdpp), fontsize=14, ha='center', va='bottom') if show: pl.show() pl.close() else: if plot_raw: return fig, axes else: return fig, axes[0]
python
def plot(self, show=True, plot_raw=True, plot_gp=True, plot_bad=True, plot_out=True, plot_cbv=True, simple=False): ''' Plots the final de-trended light curve. :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` :param bool plot_raw: Show the raw light curve? Default :py:obj:`True` :param bool plot_gp: Show the GP model prediction? \ Default :py:obj:`True` :param bool plot_bad: Show and indicate the bad data points? \ Default :py:obj:`True` :param bool plot_out: Show and indicate the outliers? \ Default :py:obj:`True` :param bool plot_cbv: Plot the CBV-corrected light curve? \ Default :py:obj:`True`. If :py:obj:`False`, plots the \ de-trended but uncorrected light curve. ''' log.info('Plotting the light curve...') # Set up axes if plot_raw: fig, axes = pl.subplots(2, figsize=(13, 9), sharex=True) fig.subplots_adjust(hspace=0.1) axes = [axes[1], axes[0]] if plot_cbv: fluxes = [self.fcor, self.fraw] else: fluxes = [self.flux, self.fraw] labels = ['EVEREST Flux', 'Raw Flux'] else: fig, axes = pl.subplots(1, figsize=(13, 6)) axes = [axes] if plot_cbv: fluxes = [self.fcor] else: fluxes = [self.flux] labels = ['EVEREST Flux'] fig.canvas.set_window_title('EVEREST Light curve') # Set up some stuff time = self.time badmask = self.badmask nanmask = self.nanmask outmask = self.outmask transitmask = self.transitmask fraw_err = self.fraw_err breakpoints = self.breakpoints if self.cadence == 'sc': ms = 2 else: ms = 4 # Get the cdpps cdpps = [[self.get_cdpp(self.flux), self.get_cdpp_arr(self.flux)], [self.get_cdpp(self.fraw), self.get_cdpp_arr(self.fraw)]] self.cdpp = cdpps[0][0] self.cdpp_arr = cdpps[0][1] for n, ax, flux, label, c in zip([0, 1], axes, fluxes, labels, cdpps): # Initialize CDPP cdpp = c[0] cdpp_arr = c[1] # Plot the good data points ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the outliers bnmask = np.array( list(set(np.concatenate([badmask, nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[outmask] def O2(x): return x[bmask] def O3(x): return x[transitmask] if plot_out: ax.plot(O1(time), O1(flux), ls='none', color="#777777", marker='.', markersize=ms, alpha=0.5) if plot_bad: ax.plot(O2(time), O2(flux), 'r.', markersize=ms, alpha=0.25) ax.plot(O3(time), O3(flux), 'b.', markersize=ms, alpha=0.25) # Plot the GP if n == 0 and plot_gp and self.cadence != 'sc': gp = GP(self.kernel, self.kernel_params) gp.compute(self.apply_mask(time), self.apply_mask(fraw_err)) med = np.nanmedian(self.apply_mask(flux)) y, _ = gp.predict(self.apply_mask(flux) - med, time) y += med ax.plot(self.apply_mask(time), self.apply_mask( y), 'r-', lw=0.5, alpha=0.5) # Appearance if n == 0: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) ax.set_ylabel(label, fontsize=18) for brkpt in breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) if len(cdpp_arr) == 2: ax.annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.annotate('%.2f ppm' % cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) elif len(cdpp_arr) < 6: for n in range(len(cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10, zorder=99, color='r') else: ax.annotate('%.2f ppm' % cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux f = np.concatenate([np.delete(f, bnmask) for f in fluxes]) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Show total CDPP improvement pl.figtext(0.5, 0.94, '%s %d' % (self._mission.IDSTRING, self.ID), fontsize=18, ha='center', va='bottom') pl.figtext(0.5, 0.905, r'$%.2f\ \mathrm{ppm} \rightarrow %.2f\ \mathrm{ppm}$' % (self.cdppr, self.cdpp), fontsize=14, ha='center', va='bottom') if show: pl.show() pl.close() else: if plot_raw: return fig, axes else: return fig, axes[0]
[ "def", "plot", "(", "self", ",", "show", "=", "True", ",", "plot_raw", "=", "True", ",", "plot_gp", "=", "True", ",", "plot_bad", "=", "True", ",", "plot_out", "=", "True", ",", "plot_cbv", "=", "True", ",", "simple", "=", "False", ")", ":", "log", ".", "info", "(", "'Plotting the light curve...'", ")", "# Set up axes", "if", "plot_raw", ":", "fig", ",", "axes", "=", "pl", ".", "subplots", "(", "2", ",", "figsize", "=", "(", "13", ",", "9", ")", ",", "sharex", "=", "True", ")", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0.1", ")", "axes", "=", "[", "axes", "[", "1", "]", ",", "axes", "[", "0", "]", "]", "if", "plot_cbv", ":", "fluxes", "=", "[", "self", ".", "fcor", ",", "self", ".", "fraw", "]", "else", ":", "fluxes", "=", "[", "self", ".", "flux", ",", "self", ".", "fraw", "]", "labels", "=", "[", "'EVEREST Flux'", ",", "'Raw Flux'", "]", "else", ":", "fig", ",", "axes", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "13", ",", "6", ")", ")", "axes", "=", "[", "axes", "]", "if", "plot_cbv", ":", "fluxes", "=", "[", "self", ".", "fcor", "]", "else", ":", "fluxes", "=", "[", "self", ".", "flux", "]", "labels", "=", "[", "'EVEREST Flux'", "]", "fig", ".", "canvas", ".", "set_window_title", "(", "'EVEREST Light curve'", ")", "# Set up some stuff", "time", "=", "self", ".", "time", "badmask", "=", "self", ".", "badmask", "nanmask", "=", "self", ".", "nanmask", "outmask", "=", "self", ".", "outmask", "transitmask", "=", "self", ".", "transitmask", "fraw_err", "=", "self", ".", "fraw_err", "breakpoints", "=", "self", ".", "breakpoints", "if", "self", ".", "cadence", "==", "'sc'", ":", "ms", "=", "2", "else", ":", "ms", "=", "4", "# Get the cdpps", "cdpps", "=", "[", "[", "self", ".", "get_cdpp", "(", "self", ".", "flux", ")", ",", "self", ".", "get_cdpp_arr", "(", "self", ".", "flux", ")", "]", ",", "[", "self", ".", "get_cdpp", "(", "self", ".", "fraw", ")", ",", "self", ".", "get_cdpp_arr", "(", "self", ".", "fraw", ")", "]", "]", "self", ".", "cdpp", "=", "cdpps", "[", "0", "]", "[", "0", "]", "self", ".", "cdpp_arr", "=", "cdpps", "[", "0", "]", "[", "1", "]", "for", "n", ",", "ax", ",", "flux", ",", "label", ",", "c", "in", "zip", "(", "[", "0", ",", "1", "]", ",", "axes", ",", "fluxes", ",", "labels", ",", "cdpps", ")", ":", "# Initialize CDPP", "cdpp", "=", "c", "[", "0", "]", "cdpp_arr", "=", "c", "[", "1", "]", "# Plot the good data points", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "time", ")", ",", "self", ".", "apply_mask", "(", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "# Plot the outliers", "bnmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "badmask", ",", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "bmask", "=", "[", "i", "for", "i", "in", "self", ".", "badmask", "if", "i", "not", "in", "self", ".", "nanmask", "]", "def", "O1", "(", "x", ")", ":", "return", "x", "[", "outmask", "]", "def", "O2", "(", "x", ")", ":", "return", "x", "[", "bmask", "]", "def", "O3", "(", "x", ")", ":", "return", "x", "[", "transitmask", "]", "if", "plot_out", ":", "ax", ".", "plot", "(", "O1", "(", "time", ")", ",", "O1", "(", "flux", ")", ",", "ls", "=", "'none'", ",", "color", "=", "\"#777777\"", ",", "marker", "=", "'.'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "if", "plot_bad", ":", "ax", ".", "plot", "(", "O2", "(", "time", ")", ",", "O2", "(", "flux", ")", ",", "'r.'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.25", ")", "ax", ".", "plot", "(", "O3", "(", "time", ")", ",", "O3", "(", "flux", ")", ",", "'b.'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.25", ")", "# Plot the GP", "if", "n", "==", "0", "and", "plot_gp", "and", "self", ".", "cadence", "!=", "'sc'", ":", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ")", "gp", ".", "compute", "(", "self", ".", "apply_mask", "(", "time", ")", ",", "self", ".", "apply_mask", "(", "fraw_err", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "apply_mask", "(", "flux", ")", ")", "y", ",", "_", "=", "gp", ".", "predict", "(", "self", ".", "apply_mask", "(", "flux", ")", "-", "med", ",", "time", ")", "y", "+=", "med", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "time", ")", ",", "self", ".", "apply_mask", "(", "y", ")", ",", "'r-'", ",", "lw", "=", "0.5", ",", "alpha", "=", "0.5", ")", "# Appearance", "if", "n", "==", "0", ":", "ax", ".", "set_xlabel", "(", "'Time (%s)'", "%", "self", ".", "_mission", ".", "TIMEUNITS", ",", "fontsize", "=", "18", ")", "ax", ".", "set_ylabel", "(", "label", ",", "fontsize", "=", "18", ")", "for", "brkpt", "in", "breakpoints", "[", ":", "-", "1", "]", ":", "ax", ".", "axvline", "(", "time", "[", "brkpt", "]", ",", "color", "=", "'r'", ",", "ls", "=", "'--'", ",", "alpha", "=", "0.25", ")", "if", "len", "(", "cdpp_arr", ")", "==", "2", ":", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "cdpp_arr", "[", "0", "]", ",", "xy", "=", "(", "0.02", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "12", ",", "color", "=", "'r'", ",", "zorder", "=", "99", ")", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "cdpp_arr", "[", "1", "]", ",", "xy", "=", "(", "0.98", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'top'", ",", "fontsize", "=", "12", ",", "color", "=", "'r'", ",", "zorder", "=", "99", ")", "elif", "len", "(", "cdpp_arr", ")", "<", "6", ":", "for", "n", "in", "range", "(", "len", "(", "cdpp_arr", ")", ")", ":", "if", "n", ">", "0", ":", "x", "=", "(", "self", ".", "time", "[", "self", ".", "breakpoints", "[", "n", "-", "1", "]", "]", "-", "self", ".", "time", "[", "0", "]", ")", "/", "(", "self", ".", "time", "[", "-", "1", "]", "-", "self", ".", "time", "[", "0", "]", ")", "+", "0.02", "else", ":", "x", "=", "0.02", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "cdpp_arr", "[", "n", "]", ",", "xy", "=", "(", "x", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "10", ",", "zorder", "=", "99", ",", "color", "=", "'r'", ")", "else", ":", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "cdpp", ",", "xy", "=", "(", "0.02", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'left'", ",", "va", "=", "'top'", ",", "fontsize", "=", "12", ",", "color", "=", "'r'", ",", "zorder", "=", "99", ")", "ax", ".", "margins", "(", "0.01", ",", "0.1", ")", "# Get y lims that bound 99% of the flux", "f", "=", "np", ".", "concatenate", "(", "[", "np", ".", "delete", "(", "f", ",", "bnmask", ")", "for", "f", "in", "fluxes", "]", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "f", ")", ")", "hi", ",", "lo", "=", "f", "[", "np", ".", "argsort", "(", "f", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "Flux", ")", "# Indicate off-axis outliers", "for", "i", "in", "np", ".", "where", "(", "flux", "<", "ylim", "[", "0", "]", ")", "[", "0", "]", ":", "if", "i", "in", "bmask", ":", "color", "=", "\"#ffcccc\"", "if", "not", "plot_bad", ":", "continue", "elif", "i", "in", "outmask", ":", "color", "=", "\"#cccccc\"", "if", "not", "plot_out", ":", "continue", "elif", "i", "in", "nanmask", ":", "continue", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "time", "[", "i", "]", ",", "ylim", "[", "0", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ")", ")", "for", "i", "in", "np", ".", "where", "(", "flux", ">", "ylim", "[", "1", "]", ")", "[", "0", "]", ":", "if", "i", "in", "bmask", ":", "color", "=", "\"#ffcccc\"", "if", "not", "plot_bad", ":", "continue", "elif", "i", "in", "outmask", ":", "color", "=", "\"#cccccc\"", "if", "not", "plot_out", ":", "continue", "elif", "i", "in", "nanmask", ":", "continue", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "time", "[", "i", "]", ",", "ylim", "[", "1", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "-", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ")", ")", "# Show total CDPP improvement", "pl", ".", "figtext", "(", "0.5", ",", "0.94", ",", "'%s %d'", "%", "(", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", ",", "fontsize", "=", "18", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ")", "pl", ".", "figtext", "(", "0.5", ",", "0.905", ",", "r'$%.2f\\ \\mathrm{ppm} \\rightarrow %.2f\\ \\mathrm{ppm}$'", "%", "(", "self", ".", "cdppr", ",", "self", ".", "cdpp", ")", ",", "fontsize", "=", "14", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "if", "plot_raw", ":", "return", "fig", ",", "axes", "else", ":", "return", "fig", ",", "axes", "[", "0", "]" ]
Plots the final de-trended light curve. :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` :param bool plot_raw: Show the raw light curve? Default :py:obj:`True` :param bool plot_gp: Show the GP model prediction? \ Default :py:obj:`True` :param bool plot_bad: Show and indicate the bad data points? \ Default :py:obj:`True` :param bool plot_out: Show and indicate the outliers? \ Default :py:obj:`True` :param bool plot_cbv: Plot the CBV-corrected light curve? \ Default :py:obj:`True`. If :py:obj:`False`, plots the \ de-trended but uncorrected light curve.
[ "Plots", "the", "final", "de", "-", "trended", "light", "curve", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L648-L839
rodluger/everest
everest/user.py
Everest.dvs
def dvs(self): ''' Shows the data validation summary (DVS) for the target. ''' DVS(self.ID, season=self.season, mission=self.mission, model=self.model_name, clobber=self.clobber)
python
def dvs(self): ''' Shows the data validation summary (DVS) for the target. ''' DVS(self.ID, season=self.season, mission=self.mission, model=self.model_name, clobber=self.clobber)
[ "def", "dvs", "(", "self", ")", ":", "DVS", "(", "self", ".", "ID", ",", "season", "=", "self", ".", "season", ",", "mission", "=", "self", ".", "mission", ",", "model", "=", "self", ".", "model_name", ",", "clobber", "=", "self", ".", "clobber", ")" ]
Shows the data validation summary (DVS) for the target.
[ "Shows", "the", "data", "validation", "summary", "(", "DVS", ")", "for", "the", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L841-L848
rodluger/everest
everest/user.py
Everest.plot_pipeline
def plot_pipeline(self, pipeline, *args, **kwargs): ''' Plots the light curve for the target de-trended with a given pipeline. :param str pipeline: The name of the pipeline (lowercase). Options \ are 'everest2', 'everest1', and other mission-specific \ pipelines. For `K2`, the available pipelines are 'k2sff' \ and 'k2sc'. Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.plot` function of the mission. ''' if pipeline != 'everest2': return getattr(missions, self.mission).pipelines.plot(self.ID, pipeline, *args, **kwargs) else: # We're going to plot the everest 2 light curve like we plot # the other pipelines for easy comparison plot_raw = kwargs.get('plot_raw', False) plot_cbv = kwargs.get('plot_cbv', True) show = kwargs.get('show', True) if plot_raw: y = self.fraw ylabel = 'Raw Flux' elif plot_cbv: y = self.fcor ylabel = "EVEREST2 Flux" else: y = self.flux ylabel = "EVEREST2 Flux" # Remove nans bnmask = np.concatenate([self.nanmask, self.badmask]) time = np.delete(self.time, bnmask) flux = np.delete(y, bnmask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Plot bad data points ax.plot(self.time[self.badmask], y[self.badmask], "r.", markersize=3, alpha=0.2) # Show the CDPP ax.annotate('%.2f ppm' % self._mission.CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16) ax.set_ylabel(ylabel, fontsize=16) fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID)) if show: pl.show() pl.close() else: return fig, ax
python
def plot_pipeline(self, pipeline, *args, **kwargs): ''' Plots the light curve for the target de-trended with a given pipeline. :param str pipeline: The name of the pipeline (lowercase). Options \ are 'everest2', 'everest1', and other mission-specific \ pipelines. For `K2`, the available pipelines are 'k2sff' \ and 'k2sc'. Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.plot` function of the mission. ''' if pipeline != 'everest2': return getattr(missions, self.mission).pipelines.plot(self.ID, pipeline, *args, **kwargs) else: # We're going to plot the everest 2 light curve like we plot # the other pipelines for easy comparison plot_raw = kwargs.get('plot_raw', False) plot_cbv = kwargs.get('plot_cbv', True) show = kwargs.get('show', True) if plot_raw: y = self.fraw ylabel = 'Raw Flux' elif plot_cbv: y = self.fcor ylabel = "EVEREST2 Flux" else: y = self.flux ylabel = "EVEREST2 Flux" # Remove nans bnmask = np.concatenate([self.nanmask, self.badmask]) time = np.delete(self.time, bnmask) flux = np.delete(y, bnmask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Plot bad data points ax.plot(self.time[self.badmask], y[self.badmask], "r.", markersize=3, alpha=0.2) # Show the CDPP ax.annotate('%.2f ppm' % self._mission.CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16) ax.set_ylabel(ylabel, fontsize=16) fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID)) if show: pl.show() pl.close() else: return fig, ax
[ "def", "plot_pipeline", "(", "self", ",", "pipeline", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "pipeline", "!=", "'everest2'", ":", "return", "getattr", "(", "missions", ",", "self", ".", "mission", ")", ".", "pipelines", ".", "plot", "(", "self", ".", "ID", ",", "pipeline", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "# We're going to plot the everest 2 light curve like we plot", "# the other pipelines for easy comparison", "plot_raw", "=", "kwargs", ".", "get", "(", "'plot_raw'", ",", "False", ")", "plot_cbv", "=", "kwargs", ".", "get", "(", "'plot_cbv'", ",", "True", ")", "show", "=", "kwargs", ".", "get", "(", "'show'", ",", "True", ")", "if", "plot_raw", ":", "y", "=", "self", ".", "fraw", "ylabel", "=", "'Raw Flux'", "elif", "plot_cbv", ":", "y", "=", "self", ".", "fcor", "ylabel", "=", "\"EVEREST2 Flux\"", "else", ":", "y", "=", "self", ".", "flux", "ylabel", "=", "\"EVEREST2 Flux\"", "# Remove nans", "bnmask", "=", "np", ".", "concatenate", "(", "[", "self", ".", "nanmask", ",", "self", ".", "badmask", "]", ")", "time", "=", "np", ".", "delete", "(", "self", ".", "time", ",", "bnmask", ")", "flux", "=", "np", ".", "delete", "(", "y", ",", "bnmask", ")", "# Plot it", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "10", ",", "4", ")", ")", "fig", ".", "subplots_adjust", "(", "bottom", "=", "0.15", ")", "ax", ".", "plot", "(", "time", ",", "flux", ",", "\"k.\"", ",", "markersize", "=", "3", ",", "alpha", "=", "0.5", ")", "# Axis limits", "N", "=", "int", "(", "0.995", "*", "len", "(", "flux", ")", ")", "hi", ",", "lo", "=", "flux", "[", "np", ".", "argsort", "(", "flux", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "# Plot bad data points", "ax", ".", "plot", "(", "self", ".", "time", "[", "self", ".", "badmask", "]", ",", "y", "[", "self", ".", "badmask", "]", ",", "\"r.\"", ",", "markersize", "=", "3", ",", "alpha", "=", "0.2", ")", "# Show the CDPP", "ax", ".", "annotate", "(", "'%.2f ppm'", "%", "self", ".", "_mission", ".", "CDPP", "(", "flux", ")", ",", "xy", "=", "(", "0.98", ",", "0.975", ")", ",", "xycoords", "=", "'axes fraction'", ",", "ha", "=", "'right'", ",", "va", "=", "'top'", ",", "fontsize", "=", "12", ",", "color", "=", "'r'", ",", "zorder", "=", "99", ")", "# Appearance", "ax", ".", "margins", "(", "0", ",", "None", ")", "ax", ".", "set_xlabel", "(", "\"Time (%s)\"", "%", "self", ".", "_mission", ".", "TIMEUNITS", ",", "fontsize", "=", "16", ")", "ax", ".", "set_ylabel", "(", "ylabel", ",", "fontsize", "=", "16", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "\"EVEREST2: EPIC %d\"", "%", "(", "self", ".", "ID", ")", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "return", "fig", ",", "ax" ]
Plots the light curve for the target de-trended with a given pipeline. :param str pipeline: The name of the pipeline (lowercase). Options \ are 'everest2', 'everest1', and other mission-specific \ pipelines. For `K2`, the available pipelines are 'k2sff' \ and 'k2sc'. Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.plot` function of the mission.
[ "Plots", "the", "light", "curve", "for", "the", "target", "de", "-", "trended", "with", "a", "given", "pipeline", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L850-L925
rodluger/everest
everest/user.py
Everest.get_pipeline
def get_pipeline(self, *args, **kwargs): ''' Returns the `time` and `flux` arrays for the target obtained by a given pipeline. Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.get` function of the mission. ''' return getattr(missions, self.mission).pipelines.get(self.ID, *args, **kwargs)
python
def get_pipeline(self, *args, **kwargs): ''' Returns the `time` and `flux` arrays for the target obtained by a given pipeline. Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.get` function of the mission. ''' return getattr(missions, self.mission).pipelines.get(self.ID, *args, **kwargs)
[ "def", "get_pipeline", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "missions", ",", "self", ".", "mission", ")", ".", "pipelines", ".", "get", "(", "self", ".", "ID", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns the `time` and `flux` arrays for the target obtained by a given pipeline. Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.get` function of the mission.
[ "Returns", "the", "time", "and", "flux", "arrays", "for", "the", "target", "obtained", "by", "a", "given", "pipeline", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L927-L938
rodluger/everest
everest/user.py
Everest.mask_planet
def mask_planet(self, t0, period, dur=0.2): ''' Mask all of the transits/eclipses of a given planet/EB. After calling this method, you must re-compute the model by calling :py:meth:`compute` in order for the mask to take effect. :param float t0: The time of first transit (same units as light curve) :param float period: The period of the planet in days :param foat dur: The transit duration in days. Default 0.2 ''' mask = [] t0 += np.ceil((self.time[0] - dur - t0) / period) * period for t in np.arange(t0, self.time[-1] + dur, period): mask.extend(np.where(np.abs(self.time - t) < dur / 2.)[0]) self.transitmask = np.array( list(set(np.concatenate([self.transitmask, mask]))))
python
def mask_planet(self, t0, period, dur=0.2): ''' Mask all of the transits/eclipses of a given planet/EB. After calling this method, you must re-compute the model by calling :py:meth:`compute` in order for the mask to take effect. :param float t0: The time of first transit (same units as light curve) :param float period: The period of the planet in days :param foat dur: The transit duration in days. Default 0.2 ''' mask = [] t0 += np.ceil((self.time[0] - dur - t0) / period) * period for t in np.arange(t0, self.time[-1] + dur, period): mask.extend(np.where(np.abs(self.time - t) < dur / 2.)[0]) self.transitmask = np.array( list(set(np.concatenate([self.transitmask, mask]))))
[ "def", "mask_planet", "(", "self", ",", "t0", ",", "period", ",", "dur", "=", "0.2", ")", ":", "mask", "=", "[", "]", "t0", "+=", "np", ".", "ceil", "(", "(", "self", ".", "time", "[", "0", "]", "-", "dur", "-", "t0", ")", "/", "period", ")", "*", "period", "for", "t", "in", "np", ".", "arange", "(", "t0", ",", "self", ".", "time", "[", "-", "1", "]", "+", "dur", ",", "period", ")", ":", "mask", ".", "extend", "(", "np", ".", "where", "(", "np", ".", "abs", "(", "self", ".", "time", "-", "t", ")", "<", "dur", "/", "2.", ")", "[", "0", "]", ")", "self", ".", "transitmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "transitmask", ",", "mask", "]", ")", ")", ")", ")" ]
Mask all of the transits/eclipses of a given planet/EB. After calling this method, you must re-compute the model by calling :py:meth:`compute` in order for the mask to take effect. :param float t0: The time of first transit (same units as light curve) :param float period: The period of the planet in days :param foat dur: The transit duration in days. Default 0.2
[ "Mask", "all", "of", "the", "transits", "/", "eclipses", "of", "a", "given", "planet", "/", "EB", ".", "After", "calling", "this", "method", "you", "must", "re", "-", "compute", "the", "model", "by", "calling", ":", "py", ":", "meth", ":", "compute", "in", "order", "for", "the", "mask", "to", "take", "effect", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L940-L957
rodluger/everest
everest/user.py
Everest._plot_weights
def _plot_weights(self, show=True): ''' .. warning:: Untested! ''' # Set up the axes fig = pl.figure(figsize=(12, 12)) fig.subplots_adjust(top=0.95, bottom=0.025, left=0.1, right=0.92) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) ax = [pl.subplot2grid((80, 130), (20 * j, 25 * i), colspan=23, rowspan=18) for j in range(len(self.breakpoints) * 2) for i in range(1 + 2 * (self.pld_order - 1))] cax = [pl.subplot2grid((80, 130), (20 * j, 25 * (1 + 2 * (self.pld_order - 1))), colspan=4, rowspan=18) for j in range(len(self.breakpoints) * 2)] ax = np.array(ax).reshape(2 * len(self.breakpoints), -1) cax = np.array(cax) # Check number of segments if len(self.breakpoints) > 3: log.error('Cannot currently plot weights for light ' + 'curves with more than 3 segments.') return # Loop over all PLD orders and over all chunks npix = len(self.fpix[1]) ap = self.aperture.flatten() ncol = 1 + 2 * (len(self.weights[0]) - 1) raw_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) scaled_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) # Loop over orders for o in range(len(self.weights[0])): if o == 0: oi = 0 else: oi = 1 + 2 * (o - 1) # Loop over chunks for b in range(len(self.weights)): c = self.get_chunk(b) rw_ii = np.zeros(npix) rw_ij = np.zeros(npix) sw_ii = np.zeros(npix) sw_ij = np.zeros(npix) X = np.nanmedian(self.X(o, c), axis=0) # Compute all sets of pixels at this PLD order, then # loop over them and assign the weights to the correct pixels sets = np.array(list(multichoose(np.arange(npix).T, o + 1))) for i, s in enumerate(sets): if (o == 0) or (s[0] == s[1]): # Not the cross-terms j = s[0] rw_ii[j] += self.weights[b][o][i] sw_ii[j] += X[i] * self.weights[b][o][i] else: # Cross-terms for j in s: rw_ij[j] += self.weights[b][o][i] sw_ij[j] += X[i] * self.weights[b][o][i] # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ii[n] sw[i] = sw_ii[n] n += 1 raw_weights[b][oi] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi] = sw.reshape(*self.aperture.shape) if o > 0: # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ij[n] sw[i] = sw_ij[n] n += 1 raw_weights[b][oi + 1] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi + 1] = sw.reshape(*self.aperture.shape) # Plot the images log.info('Plotting the PLD weights...') rdbu = pl.get_cmap('RdBu_r') rdbu.set_bad('k') for b in range(len(self.weights)): rmax = max([-raw_weights[b][o].min() for o in range(ncol)] + [raw_weights[b][o].max() for o in range(ncol)]) smax = max([-scaled_weights[b][o].min() for o in range(ncol)] + [scaled_weights[b][o].max() for o in range(ncol)]) for o in range(ncol): imr = ax[2 * b, o].imshow(raw_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-rmax, vmax=rmax) ims = ax[2 * b + 1, o].imshow(scaled_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-smax, vmax=smax) # Colorbars def fmt(x, pos): a, b = '{:.0e}'.format(x).split('e') b = int(b) if float(a) > 0: a = r'+' + a elif float(a) == 0: return '' return r'${} \times 10^{{{}}}$'.format(a, b) cbr = pl.colorbar(imr, cax=cax[2 * b], format=FuncFormatter(fmt)) cbr.ax.tick_params(labelsize=8) cbs = pl.colorbar( ims, cax=cax[2 * b + 1], format=FuncFormatter(fmt)) cbs.ax.tick_params(labelsize=8) # Plot aperture contours def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.aperture.shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) for axis in ax.flatten(): axis.contour(highres, levels=[ 0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: axis.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) axis.set_xlim(-0.5, nx - 0.5) axis.set_ylim(-0.5, ny - 0.5) axis.set_xticks([]) axis.set_yticks([]) # Labels titles = [r'$1^{\mathrm{st}}$', r'$2^{\mathrm{nd}}\ (i = j)$', r'$2^{\mathrm{nd}}\ (i \neq j)$', r'$3^{\mathrm{rd}}\ (i = j)$', r'$3^{\mathrm{rd}}\ (i \neq j)$'] + ['' for i in range(10)] for i, axis in enumerate(ax[0]): axis.set_title(titles[i], fontsize=12) for j in range(len(self.weights)): ax[2 * j, 0].text(-0.55, -0.15, r'$%d$' % (j + 1), fontsize=16, transform=ax[2 * j, 0].transAxes) ax[2 * j, 0].set_ylabel(r'$w_{ij}$', fontsize=18) ax[2 * j + 1, 0].set_ylabel(r'$\bar{X}_{ij} \cdot w_{ij}$', fontsize=18) if show: pl.show() pl.close() else: return fig, ax, cax
python
def _plot_weights(self, show=True): ''' .. warning:: Untested! ''' # Set up the axes fig = pl.figure(figsize=(12, 12)) fig.subplots_adjust(top=0.95, bottom=0.025, left=0.1, right=0.92) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) ax = [pl.subplot2grid((80, 130), (20 * j, 25 * i), colspan=23, rowspan=18) for j in range(len(self.breakpoints) * 2) for i in range(1 + 2 * (self.pld_order - 1))] cax = [pl.subplot2grid((80, 130), (20 * j, 25 * (1 + 2 * (self.pld_order - 1))), colspan=4, rowspan=18) for j in range(len(self.breakpoints) * 2)] ax = np.array(ax).reshape(2 * len(self.breakpoints), -1) cax = np.array(cax) # Check number of segments if len(self.breakpoints) > 3: log.error('Cannot currently plot weights for light ' + 'curves with more than 3 segments.') return # Loop over all PLD orders and over all chunks npix = len(self.fpix[1]) ap = self.aperture.flatten() ncol = 1 + 2 * (len(self.weights[0]) - 1) raw_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) scaled_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) # Loop over orders for o in range(len(self.weights[0])): if o == 0: oi = 0 else: oi = 1 + 2 * (o - 1) # Loop over chunks for b in range(len(self.weights)): c = self.get_chunk(b) rw_ii = np.zeros(npix) rw_ij = np.zeros(npix) sw_ii = np.zeros(npix) sw_ij = np.zeros(npix) X = np.nanmedian(self.X(o, c), axis=0) # Compute all sets of pixels at this PLD order, then # loop over them and assign the weights to the correct pixels sets = np.array(list(multichoose(np.arange(npix).T, o + 1))) for i, s in enumerate(sets): if (o == 0) or (s[0] == s[1]): # Not the cross-terms j = s[0] rw_ii[j] += self.weights[b][o][i] sw_ii[j] += X[i] * self.weights[b][o][i] else: # Cross-terms for j in s: rw_ij[j] += self.weights[b][o][i] sw_ij[j] += X[i] * self.weights[b][o][i] # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ii[n] sw[i] = sw_ii[n] n += 1 raw_weights[b][oi] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi] = sw.reshape(*self.aperture.shape) if o > 0: # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ij[n] sw[i] = sw_ij[n] n += 1 raw_weights[b][oi + 1] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi + 1] = sw.reshape(*self.aperture.shape) # Plot the images log.info('Plotting the PLD weights...') rdbu = pl.get_cmap('RdBu_r') rdbu.set_bad('k') for b in range(len(self.weights)): rmax = max([-raw_weights[b][o].min() for o in range(ncol)] + [raw_weights[b][o].max() for o in range(ncol)]) smax = max([-scaled_weights[b][o].min() for o in range(ncol)] + [scaled_weights[b][o].max() for o in range(ncol)]) for o in range(ncol): imr = ax[2 * b, o].imshow(raw_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-rmax, vmax=rmax) ims = ax[2 * b + 1, o].imshow(scaled_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-smax, vmax=smax) # Colorbars def fmt(x, pos): a, b = '{:.0e}'.format(x).split('e') b = int(b) if float(a) > 0: a = r'+' + a elif float(a) == 0: return '' return r'${} \times 10^{{{}}}$'.format(a, b) cbr = pl.colorbar(imr, cax=cax[2 * b], format=FuncFormatter(fmt)) cbr.ax.tick_params(labelsize=8) cbs = pl.colorbar( ims, cax=cax[2 * b + 1], format=FuncFormatter(fmt)) cbs.ax.tick_params(labelsize=8) # Plot aperture contours def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.aperture.shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) for axis in ax.flatten(): axis.contour(highres, levels=[ 0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: axis.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) axis.set_xlim(-0.5, nx - 0.5) axis.set_ylim(-0.5, ny - 0.5) axis.set_xticks([]) axis.set_yticks([]) # Labels titles = [r'$1^{\mathrm{st}}$', r'$2^{\mathrm{nd}}\ (i = j)$', r'$2^{\mathrm{nd}}\ (i \neq j)$', r'$3^{\mathrm{rd}}\ (i = j)$', r'$3^{\mathrm{rd}}\ (i \neq j)$'] + ['' for i in range(10)] for i, axis in enumerate(ax[0]): axis.set_title(titles[i], fontsize=12) for j in range(len(self.weights)): ax[2 * j, 0].text(-0.55, -0.15, r'$%d$' % (j + 1), fontsize=16, transform=ax[2 * j, 0].transAxes) ax[2 * j, 0].set_ylabel(r'$w_{ij}$', fontsize=18) ax[2 * j + 1, 0].set_ylabel(r'$\bar{X}_{ij} \cdot w_{ij}$', fontsize=18) if show: pl.show() pl.close() else: return fig, ax, cax
[ "def", "_plot_weights", "(", "self", ",", "show", "=", "True", ")", ":", "# Set up the axes", "fig", "=", "pl", ".", "figure", "(", "figsize", "=", "(", "12", ",", "12", ")", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.95", ",", "bottom", "=", "0.025", ",", "left", "=", "0.1", ",", "right", "=", "0.92", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'%s %d'", "%", "(", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", ")", "ax", "=", "[", "pl", ".", "subplot2grid", "(", "(", "80", ",", "130", ")", ",", "(", "20", "*", "j", ",", "25", "*", "i", ")", ",", "colspan", "=", "23", ",", "rowspan", "=", "18", ")", "for", "j", "in", "range", "(", "len", "(", "self", ".", "breakpoints", ")", "*", "2", ")", "for", "i", "in", "range", "(", "1", "+", "2", "*", "(", "self", ".", "pld_order", "-", "1", ")", ")", "]", "cax", "=", "[", "pl", ".", "subplot2grid", "(", "(", "80", ",", "130", ")", ",", "(", "20", "*", "j", ",", "25", "*", "(", "1", "+", "2", "*", "(", "self", ".", "pld_order", "-", "1", ")", ")", ")", ",", "colspan", "=", "4", ",", "rowspan", "=", "18", ")", "for", "j", "in", "range", "(", "len", "(", "self", ".", "breakpoints", ")", "*", "2", ")", "]", "ax", "=", "np", ".", "array", "(", "ax", ")", ".", "reshape", "(", "2", "*", "len", "(", "self", ".", "breakpoints", ")", ",", "-", "1", ")", "cax", "=", "np", ".", "array", "(", "cax", ")", "# Check number of segments", "if", "len", "(", "self", ".", "breakpoints", ")", ">", "3", ":", "log", ".", "error", "(", "'Cannot currently plot weights for light '", "+", "'curves with more than 3 segments.'", ")", "return", "# Loop over all PLD orders and over all chunks", "npix", "=", "len", "(", "self", ".", "fpix", "[", "1", "]", ")", "ap", "=", "self", ".", "aperture", ".", "flatten", "(", ")", "ncol", "=", "1", "+", "2", "*", "(", "len", "(", "self", ".", "weights", "[", "0", "]", ")", "-", "1", ")", "raw_weights", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "breakpoints", ")", ",", "ncol", ",", "self", ".", "aperture", ".", "shape", "[", "0", "]", ",", "self", ".", "aperture", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "float", ")", "scaled_weights", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "breakpoints", ")", ",", "ncol", ",", "self", ".", "aperture", ".", "shape", "[", "0", "]", ",", "self", ".", "aperture", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "float", ")", "# Loop over orders", "for", "o", "in", "range", "(", "len", "(", "self", ".", "weights", "[", "0", "]", ")", ")", ":", "if", "o", "==", "0", ":", "oi", "=", "0", "else", ":", "oi", "=", "1", "+", "2", "*", "(", "o", "-", "1", ")", "# Loop over chunks", "for", "b", "in", "range", "(", "len", "(", "self", ".", "weights", ")", ")", ":", "c", "=", "self", ".", "get_chunk", "(", "b", ")", "rw_ii", "=", "np", ".", "zeros", "(", "npix", ")", "rw_ij", "=", "np", ".", "zeros", "(", "npix", ")", "sw_ii", "=", "np", ".", "zeros", "(", "npix", ")", "sw_ij", "=", "np", ".", "zeros", "(", "npix", ")", "X", "=", "np", ".", "nanmedian", "(", "self", ".", "X", "(", "o", ",", "c", ")", ",", "axis", "=", "0", ")", "# Compute all sets of pixels at this PLD order, then", "# loop over them and assign the weights to the correct pixels", "sets", "=", "np", ".", "array", "(", "list", "(", "multichoose", "(", "np", ".", "arange", "(", "npix", ")", ".", "T", ",", "o", "+", "1", ")", ")", ")", "for", "i", ",", "s", "in", "enumerate", "(", "sets", ")", ":", "if", "(", "o", "==", "0", ")", "or", "(", "s", "[", "0", "]", "==", "s", "[", "1", "]", ")", ":", "# Not the cross-terms", "j", "=", "s", "[", "0", "]", "rw_ii", "[", "j", "]", "+=", "self", ".", "weights", "[", "b", "]", "[", "o", "]", "[", "i", "]", "sw_ii", "[", "j", "]", "+=", "X", "[", "i", "]", "*", "self", ".", "weights", "[", "b", "]", "[", "o", "]", "[", "i", "]", "else", ":", "# Cross-terms", "for", "j", "in", "s", ":", "rw_ij", "[", "j", "]", "+=", "self", ".", "weights", "[", "b", "]", "[", "o", "]", "[", "i", "]", "sw_ij", "[", "j", "]", "+=", "X", "[", "i", "]", "*", "self", ".", "weights", "[", "b", "]", "[", "o", "]", "[", "i", "]", "# Make the array 2D and plot it", "rw", "=", "np", ".", "zeros_like", "(", "ap", ",", "dtype", "=", "float", ")", "sw", "=", "np", ".", "zeros_like", "(", "ap", ",", "dtype", "=", "float", ")", "n", "=", "0", "for", "i", ",", "a", "in", "enumerate", "(", "ap", ")", ":", "if", "(", "a", "&", "1", ")", ":", "rw", "[", "i", "]", "=", "rw_ii", "[", "n", "]", "sw", "[", "i", "]", "=", "sw_ii", "[", "n", "]", "n", "+=", "1", "raw_weights", "[", "b", "]", "[", "oi", "]", "=", "rw", ".", "reshape", "(", "*", "self", ".", "aperture", ".", "shape", ")", "scaled_weights", "[", "b", "]", "[", "oi", "]", "=", "sw", ".", "reshape", "(", "*", "self", ".", "aperture", ".", "shape", ")", "if", "o", ">", "0", ":", "# Make the array 2D and plot it", "rw", "=", "np", ".", "zeros_like", "(", "ap", ",", "dtype", "=", "float", ")", "sw", "=", "np", ".", "zeros_like", "(", "ap", ",", "dtype", "=", "float", ")", "n", "=", "0", "for", "i", ",", "a", "in", "enumerate", "(", "ap", ")", ":", "if", "(", "a", "&", "1", ")", ":", "rw", "[", "i", "]", "=", "rw_ij", "[", "n", "]", "sw", "[", "i", "]", "=", "sw_ij", "[", "n", "]", "n", "+=", "1", "raw_weights", "[", "b", "]", "[", "oi", "+", "1", "]", "=", "rw", ".", "reshape", "(", "*", "self", ".", "aperture", ".", "shape", ")", "scaled_weights", "[", "b", "]", "[", "oi", "+", "1", "]", "=", "sw", ".", "reshape", "(", "*", "self", ".", "aperture", ".", "shape", ")", "# Plot the images", "log", ".", "info", "(", "'Plotting the PLD weights...'", ")", "rdbu", "=", "pl", ".", "get_cmap", "(", "'RdBu_r'", ")", "rdbu", ".", "set_bad", "(", "'k'", ")", "for", "b", "in", "range", "(", "len", "(", "self", ".", "weights", ")", ")", ":", "rmax", "=", "max", "(", "[", "-", "raw_weights", "[", "b", "]", "[", "o", "]", ".", "min", "(", ")", "for", "o", "in", "range", "(", "ncol", ")", "]", "+", "[", "raw_weights", "[", "b", "]", "[", "o", "]", ".", "max", "(", ")", "for", "o", "in", "range", "(", "ncol", ")", "]", ")", "smax", "=", "max", "(", "[", "-", "scaled_weights", "[", "b", "]", "[", "o", "]", ".", "min", "(", ")", "for", "o", "in", "range", "(", "ncol", ")", "]", "+", "[", "scaled_weights", "[", "b", "]", "[", "o", "]", ".", "max", "(", ")", "for", "o", "in", "range", "(", "ncol", ")", "]", ")", "for", "o", "in", "range", "(", "ncol", ")", ":", "imr", "=", "ax", "[", "2", "*", "b", ",", "o", "]", ".", "imshow", "(", "raw_weights", "[", "b", "]", "[", "o", "]", ",", "aspect", "=", "'auto'", ",", "interpolation", "=", "'nearest'", ",", "cmap", "=", "rdbu", ",", "origin", "=", "'lower'", ",", "vmin", "=", "-", "rmax", ",", "vmax", "=", "rmax", ")", "ims", "=", "ax", "[", "2", "*", "b", "+", "1", ",", "o", "]", ".", "imshow", "(", "scaled_weights", "[", "b", "]", "[", "o", "]", ",", "aspect", "=", "'auto'", ",", "interpolation", "=", "'nearest'", ",", "cmap", "=", "rdbu", ",", "origin", "=", "'lower'", ",", "vmin", "=", "-", "smax", ",", "vmax", "=", "smax", ")", "# Colorbars", "def", "fmt", "(", "x", ",", "pos", ")", ":", "a", ",", "b", "=", "'{:.0e}'", ".", "format", "(", "x", ")", ".", "split", "(", "'e'", ")", "b", "=", "int", "(", "b", ")", "if", "float", "(", "a", ")", ">", "0", ":", "a", "=", "r'+'", "+", "a", "elif", "float", "(", "a", ")", "==", "0", ":", "return", "''", "return", "r'${} \\times 10^{{{}}}$'", ".", "format", "(", "a", ",", "b", ")", "cbr", "=", "pl", ".", "colorbar", "(", "imr", ",", "cax", "=", "cax", "[", "2", "*", "b", "]", ",", "format", "=", "FuncFormatter", "(", "fmt", ")", ")", "cbr", ".", "ax", ".", "tick_params", "(", "labelsize", "=", "8", ")", "cbs", "=", "pl", ".", "colorbar", "(", "ims", ",", "cax", "=", "cax", "[", "2", "*", "b", "+", "1", "]", ",", "format", "=", "FuncFormatter", "(", "fmt", ")", ")", "cbs", ".", "ax", ".", "tick_params", "(", "labelsize", "=", "8", ")", "# Plot aperture contours", "def", "PadWithZeros", "(", "vector", ",", "pad_width", ",", "iaxis", ",", "kwargs", ")", ":", "vector", "[", ":", "pad_width", "[", "0", "]", "]", "=", "0", "vector", "[", "-", "pad_width", "[", "1", "]", ":", "]", "=", "0", "return", "vector", "ny", ",", "nx", "=", "self", ".", "aperture", ".", "shape", "contour", "=", "np", ".", "zeros", "(", "(", "ny", ",", "nx", ")", ")", "contour", "[", "np", ".", "where", "(", "self", ".", "aperture", ")", "]", "=", "1", "contour", "=", "np", ".", "lib", ".", "pad", "(", "contour", ",", "1", ",", "PadWithZeros", ")", "highres", "=", "zoom", "(", "contour", ",", "100", ",", "order", "=", "0", ",", "mode", "=", "'nearest'", ")", "extent", "=", "np", ".", "array", "(", "[", "-", "1", ",", "nx", ",", "-", "1", ",", "ny", "]", ")", "for", "axis", "in", "ax", ".", "flatten", "(", ")", ":", "axis", ".", "contour", "(", "highres", ",", "levels", "=", "[", "0.5", "]", ",", "extent", "=", "extent", ",", "origin", "=", "'lower'", ",", "colors", "=", "'r'", ",", "linewidths", "=", "1", ")", "# Check for saturated columns", "for", "x", "in", "range", "(", "self", ".", "aperture", ".", "shape", "[", "0", "]", ")", ":", "for", "y", "in", "range", "(", "self", ".", "aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "self", ".", "aperture", "[", "x", "]", "[", "y", "]", "==", "AP_SATURATED_PIXEL", ":", "axis", ".", "fill", "(", "[", "y", "-", "0.5", ",", "y", "+", "0.5", ",", "y", "+", "0.5", ",", "y", "-", "0.5", "]", ",", "[", "x", "-", "0.5", ",", "x", "-", "0.5", ",", "x", "+", "0.5", ",", "x", "+", "0.5", "]", ",", "fill", "=", "False", ",", "hatch", "=", "'xxxxx'", ",", "color", "=", "'r'", ",", "lw", "=", "0", ")", "axis", ".", "set_xlim", "(", "-", "0.5", ",", "nx", "-", "0.5", ")", "axis", ".", "set_ylim", "(", "-", "0.5", ",", "ny", "-", "0.5", ")", "axis", ".", "set_xticks", "(", "[", "]", ")", "axis", ".", "set_yticks", "(", "[", "]", ")", "# Labels", "titles", "=", "[", "r'$1^{\\mathrm{st}}$'", ",", "r'$2^{\\mathrm{nd}}\\ (i = j)$'", ",", "r'$2^{\\mathrm{nd}}\\ (i \\neq j)$'", ",", "r'$3^{\\mathrm{rd}}\\ (i = j)$'", ",", "r'$3^{\\mathrm{rd}}\\ (i \\neq j)$'", "]", "+", "[", "''", "for", "i", "in", "range", "(", "10", ")", "]", "for", "i", ",", "axis", "in", "enumerate", "(", "ax", "[", "0", "]", ")", ":", "axis", ".", "set_title", "(", "titles", "[", "i", "]", ",", "fontsize", "=", "12", ")", "for", "j", "in", "range", "(", "len", "(", "self", ".", "weights", ")", ")", ":", "ax", "[", "2", "*", "j", ",", "0", "]", ".", "text", "(", "-", "0.55", ",", "-", "0.15", ",", "r'$%d$'", "%", "(", "j", "+", "1", ")", ",", "fontsize", "=", "16", ",", "transform", "=", "ax", "[", "2", "*", "j", ",", "0", "]", ".", "transAxes", ")", "ax", "[", "2", "*", "j", ",", "0", "]", ".", "set_ylabel", "(", "r'$w_{ij}$'", ",", "fontsize", "=", "18", ")", "ax", "[", "2", "*", "j", "+", "1", ",", "0", "]", ".", "set_ylabel", "(", "r'$\\bar{X}_{ij} \\cdot w_{ij}$'", ",", "fontsize", "=", "18", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "return", "fig", ",", "ax", ",", "cax" ]
.. warning:: Untested!
[ "..", "warning", "::", "Untested!" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L959-L1139
rodluger/everest
everest/user.py
Everest._save_npz
def _save_npz(self): ''' Saves all of the de-trending information to disk in an `npz` file ''' # Save the data d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
python
def _save_npz(self): ''' Saves all of the de-trending information to disk in an `npz` file ''' # Save the data d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
[ "def", "_save_npz", "(", "self", ")", ":", "# Save the data", "d", "=", "dict", "(", "self", ".", "__dict__", ")", "d", ".", "pop", "(", "'_weights'", ",", "None", ")", "d", ".", "pop", "(", "'_A'", ",", "None", ")", "d", ".", "pop", "(", "'_B'", ",", "None", ")", "d", ".", "pop", "(", "'_f'", ",", "None", ")", "d", ".", "pop", "(", "'_mK'", ",", "None", ")", "d", ".", "pop", "(", "'K'", ",", "None", ")", "d", ".", "pop", "(", "'dvs'", ",", "None", ")", "d", ".", "pop", "(", "'clobber'", ",", "None", ")", "d", ".", "pop", "(", "'clobber_tpf'", ",", "None", ")", "d", ".", "pop", "(", "'_mission'", ",", "None", ")", "d", ".", "pop", "(", "'debug'", ",", "None", ")", "np", ".", "savez", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "self", ".", "name", "+", "'.npz'", ")", ",", "*", "*", "d", ")" ]
Saves all of the de-trending information to disk in an `npz` file
[ "Saves", "all", "of", "the", "de", "-", "trending", "information", "to", "disk", "in", "an", "npz", "file" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L1298-L1317
rodluger/everest
everest/user.py
Everest.optimize
def optimize(self, piter=3, pmaxf=300, ppert=0.1): ''' Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`. ''' self._save_npz() optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf, ppert=ppert, debug=True, clobber=True) optimized.publish() self.reset()
python
def optimize(self, piter=3, pmaxf=300, ppert=0.1): ''' Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`. ''' self._save_npz() optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf, ppert=ppert, debug=True, clobber=True) optimized.publish() self.reset()
[ "def", "optimize", "(", "self", ",", "piter", "=", "3", ",", "pmaxf", "=", "300", ",", "ppert", "=", "0.1", ")", ":", "self", ".", "_save_npz", "(", ")", "optimized", "=", "pPLD", "(", "self", ".", "ID", ",", "piter", "=", "piter", ",", "pmaxf", "=", "pmaxf", ",", "ppert", "=", "ppert", ",", "debug", "=", "True", ",", "clobber", "=", "True", ")", "optimized", ".", "publish", "(", ")", "self", ".", "reset", "(", ")" ]
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
[ "Runs", ":", "py", ":", "obj", ":", "pPLD", "on", "the", "target", "in", "an", "attempt", "to", "further", "optimize", "the", "values", "of", "the", "PLD", "priors", ".", "See", ":", "py", ":", "class", ":", "everest", ".", "detrender", ".", "pPLD", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L1319-L1330
rodluger/everest
everest/user.py
Everest.plot_folded
def plot_folded(self, t0, period, dur=0.2): ''' Plot the light curve folded on a given `period` and centered at `t0`. When plotting folded transits, please mask them using :py:meth:`mask_planet` and re-compute the model using :py:meth:`compute`. :param float t0: The time at which to center the plot \ (same units as light curve) :param float period: The period of the folding operation :param float dur: The transit duration in days. Default 0.2 ''' # Mask the planet self.mask_planet(t0, period, dur) # Whiten gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) fwhite = (self.flux - y) fwhite /= np.nanmedian(fwhite) # Fold tfold = (self.time - t0 - period / 2.) % period - period / 2. # Crop inds = np.where(np.abs(tfold) < 2 * dur)[0] x = tfold[inds] y = fwhite[inds] # Plot fig, ax = pl.subplots(1, figsize=(9, 5)) fig.subplots_adjust(bottom=0.125) ax.plot(x, y, 'k.', alpha=0.5) # Get ylims yfin = np.delete(y, np.where(np.isnan(y))) lo, hi = yfin[np.argsort(yfin)][[3, -3]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(*ylim) # Appearance ax.set_xlabel(r'Time (days)', fontsize=18) ax.set_ylabel(r'Normalized Flux', fontsize=18) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) pl.show()
python
def plot_folded(self, t0, period, dur=0.2): ''' Plot the light curve folded on a given `period` and centered at `t0`. When plotting folded transits, please mask them using :py:meth:`mask_planet` and re-compute the model using :py:meth:`compute`. :param float t0: The time at which to center the plot \ (same units as light curve) :param float period: The period of the folding operation :param float dur: The transit duration in days. Default 0.2 ''' # Mask the planet self.mask_planet(t0, period, dur) # Whiten gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) fwhite = (self.flux - y) fwhite /= np.nanmedian(fwhite) # Fold tfold = (self.time - t0 - period / 2.) % period - period / 2. # Crop inds = np.where(np.abs(tfold) < 2 * dur)[0] x = tfold[inds] y = fwhite[inds] # Plot fig, ax = pl.subplots(1, figsize=(9, 5)) fig.subplots_adjust(bottom=0.125) ax.plot(x, y, 'k.', alpha=0.5) # Get ylims yfin = np.delete(y, np.where(np.isnan(y))) lo, hi = yfin[np.argsort(yfin)][[3, -3]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(*ylim) # Appearance ax.set_xlabel(r'Time (days)', fontsize=18) ax.set_ylabel(r'Normalized Flux', fontsize=18) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) pl.show()
[ "def", "plot_folded", "(", "self", ",", "t0", ",", "period", ",", "dur", "=", "0.2", ")", ":", "# Mask the planet", "self", ".", "mask_planet", "(", "t0", ",", "period", ",", "dur", ")", "# Whiten", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "white", "=", "False", ")", "gp", ".", "compute", "(", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "fraw_err", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ")", "y", ",", "_", "=", "gp", ".", "predict", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", "-", "med", ",", "self", ".", "time", ")", "fwhite", "=", "(", "self", ".", "flux", "-", "y", ")", "fwhite", "/=", "np", ".", "nanmedian", "(", "fwhite", ")", "# Fold", "tfold", "=", "(", "self", ".", "time", "-", "t0", "-", "period", "/", "2.", ")", "%", "period", "-", "period", "/", "2.", "# Crop", "inds", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "tfold", ")", "<", "2", "*", "dur", ")", "[", "0", "]", "x", "=", "tfold", "[", "inds", "]", "y", "=", "fwhite", "[", "inds", "]", "# Plot", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "9", ",", "5", ")", ")", "fig", ".", "subplots_adjust", "(", "bottom", "=", "0.125", ")", "ax", ".", "plot", "(", "x", ",", "y", ",", "'k.'", ",", "alpha", "=", "0.5", ")", "# Get ylims", "yfin", "=", "np", ".", "delete", "(", "y", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "y", ")", ")", ")", "lo", ",", "hi", "=", "yfin", "[", "np", ".", "argsort", "(", "yfin", ")", "]", "[", "[", "3", ",", "-", "3", "]", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "*", "ylim", ")", "# Appearance", "ax", ".", "set_xlabel", "(", "r'Time (days)'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_ylabel", "(", "r'Normalized Flux'", ",", "fontsize", "=", "18", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'%s %d'", "%", "(", "self", ".", "_mission", ".", "IDSTRING", ",", "self", ".", "ID", ")", ")", "pl", ".", "show", "(", ")" ]
Plot the light curve folded on a given `period` and centered at `t0`. When plotting folded transits, please mask them using :py:meth:`mask_planet` and re-compute the model using :py:meth:`compute`. :param float t0: The time at which to center the plot \ (same units as light curve) :param float period: The period of the folding operation :param float dur: The transit duration in days. Default 0.2
[ "Plot", "the", "light", "curve", "folded", "on", "a", "given", "period", "and", "centered", "at", "t0", ".", "When", "plotting", "folded", "transits", "please", "mask", "them", "using", ":", "py", ":", "meth", ":", "mask_planet", "and", "re", "-", "compute", "the", "model", "using", ":", "py", ":", "meth", ":", "compute", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L1332-L1383
rodluger/everest
everest/user.py
Everest.plot_transit_model
def plot_transit_model(self, show=True, fold=None, ax=None): ''' Plot the light curve de-trended with a join instrumental + transit model with the best fit transit model overlaid. The transit model should be specified using the :py:obj:`transit_model` attribute and should be an instance or list of instances of :py:class:`everest.transit.TransitModel`. :param bool show: Show the plot, or return the `fig, ax` instances? \ Default `True` :param str fold: The name of the planet/transit model on which to \ fold. If only one model is present, can be set to \ :py:obj:`True`. Default :py:obj:`False` \ (does not fold the data). :param ax: A `matplotlib` axis instance to use for plotting. \ Default :py:obj:`None` ''' if self.transit_model is None: raise ValueError("No transit model provided!") if self.transit_depth is None: self.compute() if fold is not None: if (fold is True and len(self.transit_model) > 1) or \ (type(fold) is not str): raise Exception( "Kwarg `fold` should be the name of the transit " + "model on which to fold the data.") if fold is True: # We are folding on the first index of `self.transit_model` fold = 0 elif type(fold) is str: # Figure out the index of the transit model on which to fold fold = np.argmax( [fold == tm.name for tm in self.transit_model]) log.info('Plotting the transit model folded ' + 'on transit model index %d...' % fold) else: log.info('Plotting the transit model...') # Set up axes if ax is None: if fold is not None: fig, ax = pl.subplots(1, figsize=(8, 5)) else: fig, ax = pl.subplots(1, figsize=(13, 6)) fig.canvas.set_window_title('EVEREST Light curve') else: fig = pl.gcf() # Set up some stuff if self.cadence == 'sc': ms = 2 else: ms = 4 # Fold? if fold is not None: times = self.transit_model[fold].params.get('times', None) if times is not None: time = self.time - \ [times[np.argmin(np.abs(ti - times))] for ti in self.time] t0 = times[0] else: t0 = self.transit_model[fold].params.get('t0', 0.) period = self.transit_model[fold].params.get('per', 10.) time = (self.time - t0 - period / 2.) % period - period / 2. dur = 0.01 * \ len(np.where(self.transit_model[fold]( np.linspace(t0 - 0.5, t0 + 0.5, 100)) < 0)[0]) else: time = self.time ax.plot(self.apply_mask(time), self.apply_mask(self.flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], self.flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the transit + GP model med = np.nanmedian(self.apply_mask(self.flux)) transit_model = \ med * np.sum([depth * tm(self.time) for tm, depth in zip(self.transit_model, self.transit_depth)], axis=0) gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) y, _ = gp.predict(self.apply_mask( self.flux - transit_model) - med, self.time) if fold is not None: flux = (self.flux - y) / med ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) hires_time = np.linspace(-5 * dur, 5 * dur, 1000) hires_transit_model = 1 + \ self.transit_depth[fold] * \ self.transit_model[fold](hires_time + t0) ax.plot(hires_time, hires_transit_model, 'r-', lw=1, alpha=1) else: flux = self.flux y += med y += transit_model ax.plot(time, y, 'r-', lw=1, alpha=1) # Plot the bad data points bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] ax.plot(time[bmask], flux[bmask], 'r.', markersize=ms, alpha=0.25) # Appearance ax.set_ylabel('EVEREST Flux', fontsize=18) ax.margins(0.01, 0.1) if fold is not None: ax.set_xlabel('Time From Transit Center (days)', fontsize=18) ax.set_xlim(-3 * dur, 3 * dur) else: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) for brkpt in self.breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Get y lims that bound most of the flux if fold is not None: lo = np.min(hires_transit_model) pad = 1.5 * (1 - lo) ylim = (lo - pad, 1 + pad) else: f = np.delete(flux, bnmask) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) if show: pl.show() pl.close() else: return fig, ax
python
def plot_transit_model(self, show=True, fold=None, ax=None): ''' Plot the light curve de-trended with a join instrumental + transit model with the best fit transit model overlaid. The transit model should be specified using the :py:obj:`transit_model` attribute and should be an instance or list of instances of :py:class:`everest.transit.TransitModel`. :param bool show: Show the plot, or return the `fig, ax` instances? \ Default `True` :param str fold: The name of the planet/transit model on which to \ fold. If only one model is present, can be set to \ :py:obj:`True`. Default :py:obj:`False` \ (does not fold the data). :param ax: A `matplotlib` axis instance to use for plotting. \ Default :py:obj:`None` ''' if self.transit_model is None: raise ValueError("No transit model provided!") if self.transit_depth is None: self.compute() if fold is not None: if (fold is True and len(self.transit_model) > 1) or \ (type(fold) is not str): raise Exception( "Kwarg `fold` should be the name of the transit " + "model on which to fold the data.") if fold is True: # We are folding on the first index of `self.transit_model` fold = 0 elif type(fold) is str: # Figure out the index of the transit model on which to fold fold = np.argmax( [fold == tm.name for tm in self.transit_model]) log.info('Plotting the transit model folded ' + 'on transit model index %d...' % fold) else: log.info('Plotting the transit model...') # Set up axes if ax is None: if fold is not None: fig, ax = pl.subplots(1, figsize=(8, 5)) else: fig, ax = pl.subplots(1, figsize=(13, 6)) fig.canvas.set_window_title('EVEREST Light curve') else: fig = pl.gcf() # Set up some stuff if self.cadence == 'sc': ms = 2 else: ms = 4 # Fold? if fold is not None: times = self.transit_model[fold].params.get('times', None) if times is not None: time = self.time - \ [times[np.argmin(np.abs(ti - times))] for ti in self.time] t0 = times[0] else: t0 = self.transit_model[fold].params.get('t0', 0.) period = self.transit_model[fold].params.get('per', 10.) time = (self.time - t0 - period / 2.) % period - period / 2. dur = 0.01 * \ len(np.where(self.transit_model[fold]( np.linspace(t0 - 0.5, t0 + 0.5, 100)) < 0)[0]) else: time = self.time ax.plot(self.apply_mask(time), self.apply_mask(self.flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], self.flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the transit + GP model med = np.nanmedian(self.apply_mask(self.flux)) transit_model = \ med * np.sum([depth * tm(self.time) for tm, depth in zip(self.transit_model, self.transit_depth)], axis=0) gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) y, _ = gp.predict(self.apply_mask( self.flux - transit_model) - med, self.time) if fold is not None: flux = (self.flux - y) / med ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) hires_time = np.linspace(-5 * dur, 5 * dur, 1000) hires_transit_model = 1 + \ self.transit_depth[fold] * \ self.transit_model[fold](hires_time + t0) ax.plot(hires_time, hires_transit_model, 'r-', lw=1, alpha=1) else: flux = self.flux y += med y += transit_model ax.plot(time, y, 'r-', lw=1, alpha=1) # Plot the bad data points bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] ax.plot(time[bmask], flux[bmask], 'r.', markersize=ms, alpha=0.25) # Appearance ax.set_ylabel('EVEREST Flux', fontsize=18) ax.margins(0.01, 0.1) if fold is not None: ax.set_xlabel('Time From Transit Center (days)', fontsize=18) ax.set_xlim(-3 * dur, 3 * dur) else: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) for brkpt in self.breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Get y lims that bound most of the flux if fold is not None: lo = np.min(hires_transit_model) pad = 1.5 * (1 - lo) ylim = (lo - pad, 1 + pad) else: f = np.delete(flux, bnmask) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) if show: pl.show() pl.close() else: return fig, ax
[ "def", "plot_transit_model", "(", "self", ",", "show", "=", "True", ",", "fold", "=", "None", ",", "ax", "=", "None", ")", ":", "if", "self", ".", "transit_model", "is", "None", ":", "raise", "ValueError", "(", "\"No transit model provided!\"", ")", "if", "self", ".", "transit_depth", "is", "None", ":", "self", ".", "compute", "(", ")", "if", "fold", "is", "not", "None", ":", "if", "(", "fold", "is", "True", "and", "len", "(", "self", ".", "transit_model", ")", ">", "1", ")", "or", "(", "type", "(", "fold", ")", "is", "not", "str", ")", ":", "raise", "Exception", "(", "\"Kwarg `fold` should be the name of the transit \"", "+", "\"model on which to fold the data.\"", ")", "if", "fold", "is", "True", ":", "# We are folding on the first index of `self.transit_model`", "fold", "=", "0", "elif", "type", "(", "fold", ")", "is", "str", ":", "# Figure out the index of the transit model on which to fold", "fold", "=", "np", ".", "argmax", "(", "[", "fold", "==", "tm", ".", "name", "for", "tm", "in", "self", ".", "transit_model", "]", ")", "log", ".", "info", "(", "'Plotting the transit model folded '", "+", "'on transit model index %d...'", "%", "fold", ")", "else", ":", "log", ".", "info", "(", "'Plotting the transit model...'", ")", "# Set up axes", "if", "ax", "is", "None", ":", "if", "fold", "is", "not", "None", ":", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "8", ",", "5", ")", ")", "else", ":", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "13", ",", "6", ")", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'EVEREST Light curve'", ")", "else", ":", "fig", "=", "pl", ".", "gcf", "(", ")", "# Set up some stuff", "if", "self", ".", "cadence", "==", "'sc'", ":", "ms", "=", "2", "else", ":", "ms", "=", "4", "# Fold?", "if", "fold", "is", "not", "None", ":", "times", "=", "self", ".", "transit_model", "[", "fold", "]", ".", "params", ".", "get", "(", "'times'", ",", "None", ")", "if", "times", "is", "not", "None", ":", "time", "=", "self", ".", "time", "-", "[", "times", "[", "np", ".", "argmin", "(", "np", ".", "abs", "(", "ti", "-", "times", ")", ")", "]", "for", "ti", "in", "self", ".", "time", "]", "t0", "=", "times", "[", "0", "]", "else", ":", "t0", "=", "self", ".", "transit_model", "[", "fold", "]", ".", "params", ".", "get", "(", "'t0'", ",", "0.", ")", "period", "=", "self", ".", "transit_model", "[", "fold", "]", ".", "params", ".", "get", "(", "'per'", ",", "10.", ")", "time", "=", "(", "self", ".", "time", "-", "t0", "-", "period", "/", "2.", ")", "%", "period", "-", "period", "/", "2.", "dur", "=", "0.01", "*", "len", "(", "np", ".", "where", "(", "self", ".", "transit_model", "[", "fold", "]", "(", "np", ".", "linspace", "(", "t0", "-", "0.5", ",", "t0", "+", "0.5", ",", "100", ")", ")", "<", "0", ")", "[", "0", "]", ")", "else", ":", "time", "=", "self", ".", "time", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "time", "[", "self", ".", "outmask", "]", ",", "self", ".", "flux", "[", "self", ".", "outmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "time", "[", "self", ".", "transitmask", "]", ",", "self", ".", "flux", "[", "self", ".", "transitmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "# Plot the transit + GP model", "med", "=", "np", ".", "nanmedian", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", ")", ")", "transit_model", "=", "med", "*", "np", ".", "sum", "(", "[", "depth", "*", "tm", "(", "self", ".", "time", ")", "for", "tm", ",", "depth", "in", "zip", "(", "self", ".", "transit_model", ",", "self", ".", "transit_depth", ")", "]", ",", "axis", "=", "0", ")", "gp", "=", "GP", "(", "self", ".", "kernel", ",", "self", ".", "kernel_params", ",", "white", "=", "False", ")", "gp", ".", "compute", "(", "self", ".", "apply_mask", "(", "self", ".", "time", ")", ",", "self", ".", "apply_mask", "(", "self", ".", "fraw_err", ")", ")", "y", ",", "_", "=", "gp", ".", "predict", "(", "self", ".", "apply_mask", "(", "self", ".", "flux", "-", "transit_model", ")", "-", "med", ",", "self", ".", "time", ")", "if", "fold", "is", "not", "None", ":", "flux", "=", "(", "self", ".", "flux", "-", "y", ")", "/", "med", "ax", ".", "plot", "(", "self", ".", "apply_mask", "(", "time", ")", ",", "self", ".", "apply_mask", "(", "flux", ")", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "time", "[", "self", ".", "outmask", "]", ",", "flux", "[", "self", ".", "outmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "time", "[", "self", ".", "transitmask", "]", ",", "flux", "[", "self", ".", "transitmask", "]", ",", "ls", "=", "'none'", ",", "marker", "=", "'.'", ",", "color", "=", "'k'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.5", ")", "hires_time", "=", "np", ".", "linspace", "(", "-", "5", "*", "dur", ",", "5", "*", "dur", ",", "1000", ")", "hires_transit_model", "=", "1", "+", "self", ".", "transit_depth", "[", "fold", "]", "*", "self", ".", "transit_model", "[", "fold", "]", "(", "hires_time", "+", "t0", ")", "ax", ".", "plot", "(", "hires_time", ",", "hires_transit_model", ",", "'r-'", ",", "lw", "=", "1", ",", "alpha", "=", "1", ")", "else", ":", "flux", "=", "self", ".", "flux", "y", "+=", "med", "y", "+=", "transit_model", "ax", ".", "plot", "(", "time", ",", "y", ",", "'r-'", ",", "lw", "=", "1", ",", "alpha", "=", "1", ")", "# Plot the bad data points", "bnmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "badmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")", "bmask", "=", "[", "i", "for", "i", "in", "self", ".", "badmask", "if", "i", "not", "in", "self", ".", "nanmask", "]", "ax", ".", "plot", "(", "time", "[", "bmask", "]", ",", "flux", "[", "bmask", "]", ",", "'r.'", ",", "markersize", "=", "ms", ",", "alpha", "=", "0.25", ")", "# Appearance", "ax", ".", "set_ylabel", "(", "'EVEREST Flux'", ",", "fontsize", "=", "18", ")", "ax", ".", "margins", "(", "0.01", ",", "0.1", ")", "if", "fold", "is", "not", "None", ":", "ax", ".", "set_xlabel", "(", "'Time From Transit Center (days)'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_xlim", "(", "-", "3", "*", "dur", ",", "3", "*", "dur", ")", "else", ":", "ax", ".", "set_xlabel", "(", "'Time (%s)'", "%", "self", ".", "_mission", ".", "TIMEUNITS", ",", "fontsize", "=", "18", ")", "for", "brkpt", "in", "self", ".", "breakpoints", "[", ":", "-", "1", "]", ":", "ax", ".", "axvline", "(", "time", "[", "brkpt", "]", ",", "color", "=", "'r'", ",", "ls", "=", "'--'", ",", "alpha", "=", "0.25", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_major_formatter", "(", "Formatter", ".", "Flux", ")", "# Get y lims that bound most of the flux", "if", "fold", "is", "not", "None", ":", "lo", "=", "np", ".", "min", "(", "hires_transit_model", ")", "pad", "=", "1.5", "*", "(", "1", "-", "lo", ")", "ylim", "=", "(", "lo", "-", "pad", ",", "1", "+", "pad", ")", "else", ":", "f", "=", "np", ".", "delete", "(", "flux", ",", "bnmask", ")", "N", "=", "int", "(", "0.995", "*", "len", "(", "f", ")", ")", "hi", ",", "lo", "=", "f", "[", "np", ".", "argsort", "(", "f", ")", "]", "[", "[", "N", ",", "-", "N", "]", "]", "pad", "=", "(", "hi", "-", "lo", ")", "*", "0.1", "ylim", "=", "(", "lo", "-", "pad", ",", "hi", "+", "pad", ")", "ax", ".", "set_ylim", "(", "ylim", ")", "# Indicate off-axis outliers", "for", "i", "in", "np", ".", "where", "(", "flux", "<", "ylim", "[", "0", "]", ")", "[", "0", "]", ":", "if", "i", "in", "bmask", ":", "color", "=", "\"#ffcccc\"", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "time", "[", "i", "]", ",", "ylim", "[", "0", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ",", "alpha", "=", "0.5", ")", ")", "for", "i", "in", "np", ".", "where", "(", "flux", ">", "ylim", "[", "1", "]", ")", "[", "0", "]", ":", "if", "i", "in", "bmask", ":", "color", "=", "\"#ffcccc\"", "else", ":", "color", "=", "\"#ccccff\"", "ax", ".", "annotate", "(", "''", ",", "xy", "=", "(", "time", "[", "i", "]", ",", "ylim", "[", "1", "]", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0", ",", "-", "15", ")", ",", "textcoords", "=", "'offset points'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-|>\"", ",", "color", "=", "color", ",", "alpha", "=", "0.5", ")", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "pl", ".", "close", "(", ")", "else", ":", "return", "fig", ",", "ax" ]
Plot the light curve de-trended with a join instrumental + transit model with the best fit transit model overlaid. The transit model should be specified using the :py:obj:`transit_model` attribute and should be an instance or list of instances of :py:class:`everest.transit.TransitModel`. :param bool show: Show the plot, or return the `fig, ax` instances? \ Default `True` :param str fold: The name of the planet/transit model on which to \ fold. If only one model is present, can be set to \ :py:obj:`True`. Default :py:obj:`False` \ (does not fold the data). :param ax: A `matplotlib` axis instance to use for plotting. \ Default :py:obj:`None`
[ "Plot", "the", "light", "curve", "de", "-", "trended", "with", "a", "join", "instrumental", "+", "transit", "model", "with", "the", "best", "fit", "transit", "model", "overlaid", ".", "The", "transit", "model", "should", "be", "specified", "using", "the", ":", "py", ":", "obj", ":", "transit_model", "attribute", "and", "should", "be", "an", "instance", "or", "list", "of", "instances", "of", ":", "py", ":", "class", ":", "everest", ".", "transit", ".", "TransitModel", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/user.py#L1385-L1549
rodluger/everest
everest/mathutils.py
Interpolate
def Interpolate(time, mask, y): ''' Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array ''' # Ensure `y` doesn't get modified in place yy = np.array(y) t_ = np.delete(time, mask) y_ = np.delete(y, mask, axis=0) if len(yy.shape) == 1: yy[mask] = np.interp(time[mask], t_, y_) elif len(yy.shape) == 2: for n in range(yy.shape[1]): yy[mask, n] = np.interp(time[mask], t_, y_[:, n]) else: raise Exception("Array ``y`` must be either 1- or 2-d.") return yy
python
def Interpolate(time, mask, y): ''' Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array ''' # Ensure `y` doesn't get modified in place yy = np.array(y) t_ = np.delete(time, mask) y_ = np.delete(y, mask, axis=0) if len(yy.shape) == 1: yy[mask] = np.interp(time[mask], t_, y_) elif len(yy.shape) == 2: for n in range(yy.shape[1]): yy[mask, n] = np.interp(time[mask], t_, y_[:, n]) else: raise Exception("Array ``y`` must be either 1- or 2-d.") return yy
[ "def", "Interpolate", "(", "time", ",", "mask", ",", "y", ")", ":", "# Ensure `y` doesn't get modified in place", "yy", "=", "np", ".", "array", "(", "y", ")", "t_", "=", "np", ".", "delete", "(", "time", ",", "mask", ")", "y_", "=", "np", ".", "delete", "(", "y", ",", "mask", ",", "axis", "=", "0", ")", "if", "len", "(", "yy", ".", "shape", ")", "==", "1", ":", "yy", "[", "mask", "]", "=", "np", ".", "interp", "(", "time", "[", "mask", "]", ",", "t_", ",", "y_", ")", "elif", "len", "(", "yy", ".", "shape", ")", "==", "2", ":", "for", "n", "in", "range", "(", "yy", ".", "shape", "[", "1", "]", ")", ":", "yy", "[", "mask", ",", "n", "]", "=", "np", ".", "interp", "(", "time", "[", "mask", "]", ",", "t_", ",", "y_", "[", ":", ",", "n", "]", ")", "else", ":", "raise", "Exception", "(", "\"Array ``y`` must be either 1- or 2-d.\"", ")", "return", "yy" ]
Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array
[ "Masks", "certain", "elements", "in", "the", "array", "y", "and", "linearly", "interpolates", "over", "them", "returning", "an", "array", "y", "of", "the", "same", "length", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L21-L44
rodluger/everest
everest/mathutils.py
Chunks
def Chunks(l, n, all=False): ''' Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point. ''' if all: jarr = range(0, n - 1) else: jarr = [0] for j in jarr: for i in range(j, len(l), n): if i + 2 * n <= len(l): yield l[i:i + n] else: if not all: yield l[i:] break
python
def Chunks(l, n, all=False): ''' Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point. ''' if all: jarr = range(0, n - 1) else: jarr = [0] for j in jarr: for i in range(j, len(l), n): if i + 2 * n <= len(l): yield l[i:i + n] else: if not all: yield l[i:] break
[ "def", "Chunks", "(", "l", ",", "n", ",", "all", "=", "False", ")", ":", "if", "all", ":", "jarr", "=", "range", "(", "0", ",", "n", "-", "1", ")", "else", ":", "jarr", "=", "[", "0", "]", "for", "j", "in", "jarr", ":", "for", "i", "in", "range", "(", "j", ",", "len", "(", "l", ")", ",", "n", ")", ":", "if", "i", "+", "2", "*", "n", "<=", "len", "(", "l", ")", ":", "yield", "l", "[", "i", ":", "i", "+", "n", "]", "else", ":", "if", "not", "all", ":", "yield", "l", "[", "i", ":", "]", "break" ]
Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point.
[ "Returns", "a", "generator", "of", "consecutive", "n", "-", "sized", "chunks", "of", "list", "l", ".", "If", "all", "is", "True", "returns", "**", "all", "**", "n", "-", "sized", "chunks", "in", "l", "by", "iterating", "over", "the", "starting", "point", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L58-L78
rodluger/everest
everest/mathutils.py
Smooth
def Smooth(x, window_len=100, window='hanning'): ''' Smooth data by convolving on a given timescale. :param ndarray x: The data array :param int window_len: The size of the smoothing window. Default `100` :param str window: The window type. Default `hanning` ''' if window_len == 0: return np.zeros_like(x) s = np.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]] if window == 'flat': w = np.ones(window_len, 'd') else: w = eval('np.' + window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1]
python
def Smooth(x, window_len=100, window='hanning'): ''' Smooth data by convolving on a given timescale. :param ndarray x: The data array :param int window_len: The size of the smoothing window. Default `100` :param str window: The window type. Default `hanning` ''' if window_len == 0: return np.zeros_like(x) s = np.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]] if window == 'flat': w = np.ones(window_len, 'd') else: w = eval('np.' + window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1]
[ "def", "Smooth", "(", "x", ",", "window_len", "=", "100", ",", "window", "=", "'hanning'", ")", ":", "if", "window_len", "==", "0", ":", "return", "np", ".", "zeros_like", "(", "x", ")", "s", "=", "np", ".", "r_", "[", "2", "*", "x", "[", "0", "]", "-", "x", "[", "window_len", "-", "1", ":", ":", "-", "1", "]", ",", "x", ",", "2", "*", "x", "[", "-", "1", "]", "-", "x", "[", "-", "1", ":", "-", "window_len", ":", "-", "1", "]", "]", "if", "window", "==", "'flat'", ":", "w", "=", "np", ".", "ones", "(", "window_len", ",", "'d'", ")", "else", ":", "w", "=", "eval", "(", "'np.'", "+", "window", "+", "'(window_len)'", ")", "y", "=", "np", ".", "convolve", "(", "w", "/", "w", ".", "sum", "(", ")", ",", "s", ",", "mode", "=", "'same'", ")", "return", "y", "[", "window_len", ":", "-", "window_len", "+", "1", "]" ]
Smooth data by convolving on a given timescale. :param ndarray x: The data array :param int window_len: The size of the smoothing window. Default `100` :param str window: The window type. Default `hanning`
[ "Smooth", "data", "by", "convolving", "on", "a", "given", "timescale", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L81-L101
rodluger/everest
everest/mathutils.py
Scatter
def Scatter(y, win=13, remove_outliers=False): ''' Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False` ''' if remove_outliers: # Remove 5-sigma outliers from data # smoothed on a 1 day timescale if len(y) >= 50: ys = y - Smooth(y, 50) else: ys = y M = np.nanmedian(ys) MAD = 1.4826 * np.nanmedian(np.abs(ys - M)) out = [] for i, _ in enumerate(y): if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD): out.append(i) out = np.array(out, dtype=int) y = np.delete(y, out) if len(y): return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win) for yi in Chunks(y, win, all=True)]) else: return np.nan
python
def Scatter(y, win=13, remove_outliers=False): ''' Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False` ''' if remove_outliers: # Remove 5-sigma outliers from data # smoothed on a 1 day timescale if len(y) >= 50: ys = y - Smooth(y, 50) else: ys = y M = np.nanmedian(ys) MAD = 1.4826 * np.nanmedian(np.abs(ys - M)) out = [] for i, _ in enumerate(y): if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD): out.append(i) out = np.array(out, dtype=int) y = np.delete(y, out) if len(y): return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win) for yi in Chunks(y, win, all=True)]) else: return np.nan
[ "def", "Scatter", "(", "y", ",", "win", "=", "13", ",", "remove_outliers", "=", "False", ")", ":", "if", "remove_outliers", ":", "# Remove 5-sigma outliers from data", "# smoothed on a 1 day timescale", "if", "len", "(", "y", ")", ">=", "50", ":", "ys", "=", "y", "-", "Smooth", "(", "y", ",", "50", ")", "else", ":", "ys", "=", "y", "M", "=", "np", ".", "nanmedian", "(", "ys", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "ys", "-", "M", ")", ")", "out", "=", "[", "]", "for", "i", ",", "_", "in", "enumerate", "(", "y", ")", ":", "if", "(", "ys", "[", "i", "]", ">", "M", "+", "5", "*", "MAD", ")", "or", "(", "ys", "[", "i", "]", "<", "M", "-", "5", "*", "MAD", ")", ":", "out", ".", "append", "(", "i", ")", "out", "=", "np", ".", "array", "(", "out", ",", "dtype", "=", "int", ")", "y", "=", "np", ".", "delete", "(", "y", ",", "out", ")", "if", "len", "(", "y", ")", ":", "return", "1.e6", "*", "np", ".", "nanmedian", "(", "[", "np", ".", "std", "(", "yi", ")", "/", "np", ".", "sqrt", "(", "win", ")", "for", "yi", "in", "Chunks", "(", "y", ",", "win", ",", "all", "=", "True", ")", "]", ")", "else", ":", "return", "np", ".", "nan" ]
Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False`
[ "Return", "the", "scatter", "in", "ppm", "based", "on", "the", "median", "running", "standard", "deviation", "for", "a", "window", "size", "of", ":", "py", ":", "obj", ":", "win", "=", "13", "cadences", "(", "for", "K2", "this", "is", "~6", ".", "5", "hours", "as", "in", "VJ14", ")", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L104-L136
rodluger/everest
everest/mathutils.py
SavGol
def SavGol(y, win=49): ''' Subtracts a second order Savitsky-Golay filter with window size `win` and returns the result. This acts as a high pass filter. ''' if len(y) >= win: return y - savgol_filter(y, win, 2) + np.nanmedian(y) else: return y
python
def SavGol(y, win=49): ''' Subtracts a second order Savitsky-Golay filter with window size `win` and returns the result. This acts as a high pass filter. ''' if len(y) >= win: return y - savgol_filter(y, win, 2) + np.nanmedian(y) else: return y
[ "def", "SavGol", "(", "y", ",", "win", "=", "49", ")", ":", "if", "len", "(", "y", ")", ">=", "win", ":", "return", "y", "-", "savgol_filter", "(", "y", ",", "win", ",", "2", ")", "+", "np", ".", "nanmedian", "(", "y", ")", "else", ":", "return", "y" ]
Subtracts a second order Savitsky-Golay filter with window size `win` and returns the result. This acts as a high pass filter.
[ "Subtracts", "a", "second", "order", "Savitsky", "-", "Golay", "filter", "with", "window", "size", "win", "and", "returns", "the", "result", ".", "This", "acts", "as", "a", "high", "pass", "filter", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L139-L149
rodluger/everest
everest/mathutils.py
NumRegressors
def NumRegressors(npix, pld_order, cross_terms=True): ''' Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True` ''' res = 0 for k in range(1, pld_order + 1): if cross_terms: res += comb(npix + k - 1, k) else: res += npix return int(res)
python
def NumRegressors(npix, pld_order, cross_terms=True): ''' Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True` ''' res = 0 for k in range(1, pld_order + 1): if cross_terms: res += comb(npix + k - 1, k) else: res += npix return int(res)
[ "def", "NumRegressors", "(", "npix", ",", "pld_order", ",", "cross_terms", "=", "True", ")", ":", "res", "=", "0", "for", "k", "in", "range", "(", "1", ",", "pld_order", "+", "1", ")", ":", "if", "cross_terms", ":", "res", "+=", "comb", "(", "npix", "+", "k", "-", "1", ",", "k", ")", "else", ":", "res", "+=", "npix", "return", "int", "(", "res", ")" ]
Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`
[ "Return", "the", "number", "of", "regressors", "for", "npix", "pixels", "and", "PLD", "order", "pld_order", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L152-L167
rodluger/everest
everest/mathutils.py
Downbin
def Downbin(x, newsize, axis=0, operation='mean'): ''' Downbins an array to a smaller size. :param array_like x: The array to down-bin :param int newsize: The new size of the axis along which to down-bin :param int axis: The axis to operate on. Default 0 :param str operation: The operation to perform when down-binning. \ Default `mean` ''' assert newsize < x.shape[axis], \ "The new size of the array must be smaller than the current size." oldsize = x.shape[axis] newshape = list(x.shape) newshape[axis] = newsize newshape.insert(axis + 1, oldsize // newsize) trim = oldsize % newsize if trim: xtrim = x[:-trim] else: xtrim = x if operation == 'mean': xbin = np.nanmean(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'sum': xbin = np.nansum(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'quadsum': xbin = np.sqrt(np.nansum(xtrim.reshape(newshape) ** 2, axis=axis + 1)) elif operation == 'median': xbin = np.nanmedian(xtrim.reshape(newshape), axis=axis + 1) else: raise ValueError("`operation` must be either `mean`, " + "`sum`, `quadsum`, or `median`.") return xbin
python
def Downbin(x, newsize, axis=0, operation='mean'): ''' Downbins an array to a smaller size. :param array_like x: The array to down-bin :param int newsize: The new size of the axis along which to down-bin :param int axis: The axis to operate on. Default 0 :param str operation: The operation to perform when down-binning. \ Default `mean` ''' assert newsize < x.shape[axis], \ "The new size of the array must be smaller than the current size." oldsize = x.shape[axis] newshape = list(x.shape) newshape[axis] = newsize newshape.insert(axis + 1, oldsize // newsize) trim = oldsize % newsize if trim: xtrim = x[:-trim] else: xtrim = x if operation == 'mean': xbin = np.nanmean(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'sum': xbin = np.nansum(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'quadsum': xbin = np.sqrt(np.nansum(xtrim.reshape(newshape) ** 2, axis=axis + 1)) elif operation == 'median': xbin = np.nanmedian(xtrim.reshape(newshape), axis=axis + 1) else: raise ValueError("`operation` must be either `mean`, " + "`sum`, `quadsum`, or `median`.") return xbin
[ "def", "Downbin", "(", "x", ",", "newsize", ",", "axis", "=", "0", ",", "operation", "=", "'mean'", ")", ":", "assert", "newsize", "<", "x", ".", "shape", "[", "axis", "]", ",", "\"The new size of the array must be smaller than the current size.\"", "oldsize", "=", "x", ".", "shape", "[", "axis", "]", "newshape", "=", "list", "(", "x", ".", "shape", ")", "newshape", "[", "axis", "]", "=", "newsize", "newshape", ".", "insert", "(", "axis", "+", "1", ",", "oldsize", "//", "newsize", ")", "trim", "=", "oldsize", "%", "newsize", "if", "trim", ":", "xtrim", "=", "x", "[", ":", "-", "trim", "]", "else", ":", "xtrim", "=", "x", "if", "operation", "==", "'mean'", ":", "xbin", "=", "np", ".", "nanmean", "(", "xtrim", ".", "reshape", "(", "newshape", ")", ",", "axis", "=", "axis", "+", "1", ")", "elif", "operation", "==", "'sum'", ":", "xbin", "=", "np", ".", "nansum", "(", "xtrim", ".", "reshape", "(", "newshape", ")", ",", "axis", "=", "axis", "+", "1", ")", "elif", "operation", "==", "'quadsum'", ":", "xbin", "=", "np", ".", "sqrt", "(", "np", ".", "nansum", "(", "xtrim", ".", "reshape", "(", "newshape", ")", "**", "2", ",", "axis", "=", "axis", "+", "1", ")", ")", "elif", "operation", "==", "'median'", ":", "xbin", "=", "np", ".", "nanmedian", "(", "xtrim", ".", "reshape", "(", "newshape", ")", ",", "axis", "=", "axis", "+", "1", ")", "else", ":", "raise", "ValueError", "(", "\"`operation` must be either `mean`, \"", "+", "\"`sum`, `quadsum`, or `median`.\"", ")", "return", "xbin" ]
Downbins an array to a smaller size. :param array_like x: The array to down-bin :param int newsize: The new size of the axis along which to down-bin :param int axis: The axis to operate on. Default 0 :param str operation: The operation to perform when down-binning. \ Default `mean`
[ "Downbins", "an", "array", "to", "a", "smaller", "size", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/mathutils.py#L170-L205
lsbardel/python-stdnet
stdnet/odm/fields.py
Field.register_with_model
def register_with_model(self, name, model): '''Called during the creation of a the :class:`StdModel` class when :class:`Metaclass` is initialised. It fills :attr:`Field.name` and :attr:`Field.model`. This is an internal function users should never call.''' if self.name: raise FieldError('Field %s is already registered\ with a model' % self) self.name = name self.attname = self.get_attname() self.model = model meta = model._meta self.meta = meta meta.dfields[name] = self meta.fields.append(self) if not self.primary_key: self.add_to_fields() else: model._meta.pk = self
python
def register_with_model(self, name, model): '''Called during the creation of a the :class:`StdModel` class when :class:`Metaclass` is initialised. It fills :attr:`Field.name` and :attr:`Field.model`. This is an internal function users should never call.''' if self.name: raise FieldError('Field %s is already registered\ with a model' % self) self.name = name self.attname = self.get_attname() self.model = model meta = model._meta self.meta = meta meta.dfields[name] = self meta.fields.append(self) if not self.primary_key: self.add_to_fields() else: model._meta.pk = self
[ "def", "register_with_model", "(", "self", ",", "name", ",", "model", ")", ":", "if", "self", ".", "name", ":", "raise", "FieldError", "(", "'Field %s is already registered\\\n with a model'", "%", "self", ")", "self", ".", "name", "=", "name", "self", ".", "attname", "=", "self", ".", "get_attname", "(", ")", "self", ".", "model", "=", "model", "meta", "=", "model", ".", "_meta", "self", ".", "meta", "=", "meta", "meta", ".", "dfields", "[", "name", "]", "=", "self", "meta", ".", "fields", ".", "append", "(", "self", ")", "if", "not", "self", ".", "primary_key", ":", "self", ".", "add_to_fields", "(", ")", "else", ":", "model", ".", "_meta", ".", "pk", "=", "self" ]
Called during the creation of a the :class:`StdModel` class when :class:`Metaclass` is initialised. It fills :attr:`Field.name` and :attr:`Field.model`. This is an internal function users should never call.
[ "Called", "during", "the", "creation", "of", "a", "the", ":", "class", ":", "StdModel", "class", "when", ":", "class", ":", "Metaclass", "is", "initialised", ".", "It", "fills", ":", "attr", ":", "Field", ".", "name", "and", ":", "attr", ":", "Field", ".", "model", ".", "This", "is", "an", "internal", "function", "users", "should", "never", "call", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/fields.py#L192-L210
lsbardel/python-stdnet
stdnet/odm/fields.py
Field.add_to_fields
def add_to_fields(self): '''Add this :class:`Field` to the fields of :attr:`model`.''' meta = self.model._meta meta.scalarfields.append(self) if self.index: meta.indices.append(self)
python
def add_to_fields(self): '''Add this :class:`Field` to the fields of :attr:`model`.''' meta = self.model._meta meta.scalarfields.append(self) if self.index: meta.indices.append(self)
[ "def", "add_to_fields", "(", "self", ")", ":", "meta", "=", "self", ".", "model", ".", "_meta", "meta", ".", "scalarfields", ".", "append", "(", "self", ")", "if", "self", ".", "index", ":", "meta", ".", "indices", ".", "append", "(", "self", ")" ]
Add this :class:`Field` to the fields of :attr:`model`.
[ "Add", "this", ":", "class", ":", "Field", "to", "the", "fields", "of", ":", "attr", ":", "model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/fields.py#L212-L217
lsbardel/python-stdnet
stdnet/odm/fields.py
Field.get_lookup
def get_lookup(self, remaining, errorClass=ValueError): '''called by the :class:`Query` method when it needs to build lookup on fields with additional nested fields. This is the case of :class:`ForeignKey` and :class:`JSONField`. :param remaining: the :ref:`double underscored` fields if this :class:`Field` :param errorClass: Optional exception class to use if the *remaining* field is not valid.''' if remaining: raise errorClass('Cannot use nested lookup on field %s' % self) return (self.attname, None)
python
def get_lookup(self, remaining, errorClass=ValueError): '''called by the :class:`Query` method when it needs to build lookup on fields with additional nested fields. This is the case of :class:`ForeignKey` and :class:`JSONField`. :param remaining: the :ref:`double underscored` fields if this :class:`Field` :param errorClass: Optional exception class to use if the *remaining* field is not valid.''' if remaining: raise errorClass('Cannot use nested lookup on field %s' % self) return (self.attname, None)
[ "def", "get_lookup", "(", "self", ",", "remaining", ",", "errorClass", "=", "ValueError", ")", ":", "if", "remaining", ":", "raise", "errorClass", "(", "'Cannot use nested lookup on field %s'", "%", "self", ")", "return", "(", "self", ".", "attname", ",", "None", ")" ]
called by the :class:`Query` method when it needs to build lookup on fields with additional nested fields. This is the case of :class:`ForeignKey` and :class:`JSONField`. :param remaining: the :ref:`double underscored` fields if this :class:`Field` :param errorClass: Optional exception class to use if the *remaining* field is not valid.
[ "called", "by", "the", ":", "class", ":", "Query", "method", "when", "it", "needs", "to", "build", "lookup", "on", "fields", "with", "additional", "nested", "fields", ".", "This", "is", "the", "case", "of", ":", "class", ":", "ForeignKey", "and", ":", "class", ":", "JSONField", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/fields.py#L254-L264
lsbardel/python-stdnet
stdnet/odm/fields.py
Field.get_value
def get_value(self, instance, *bits): '''Retrieve the value :class:`Field` from a :class:`StdModel` ``instance``. :param instance: The :class:`StdModel` ``instance`` invoking this function. :param bits: Additional information for nested fields which derives from the :ref:`double underscore <tutorial-underscore>` notation. :return: the value of this :class:`Field` in the ``instance``. can raise :class:`AttributeError`. This method is used by the :meth:`StdModel.get_attr_value` method when retrieving values form a :class:`StdModel` instance. ''' if bits: raise AttributeError else: return getattr(instance, self.attname)
python
def get_value(self, instance, *bits): '''Retrieve the value :class:`Field` from a :class:`StdModel` ``instance``. :param instance: The :class:`StdModel` ``instance`` invoking this function. :param bits: Additional information for nested fields which derives from the :ref:`double underscore <tutorial-underscore>` notation. :return: the value of this :class:`Field` in the ``instance``. can raise :class:`AttributeError`. This method is used by the :meth:`StdModel.get_attr_value` method when retrieving values form a :class:`StdModel` instance. ''' if bits: raise AttributeError else: return getattr(instance, self.attname)
[ "def", "get_value", "(", "self", ",", "instance", ",", "*", "bits", ")", ":", "if", "bits", ":", "raise", "AttributeError", "else", ":", "return", "getattr", "(", "instance", ",", "self", ".", "attname", ")" ]
Retrieve the value :class:`Field` from a :class:`StdModel` ``instance``. :param instance: The :class:`StdModel` ``instance`` invoking this function. :param bits: Additional information for nested fields which derives from the :ref:`double underscore <tutorial-underscore>` notation. :return: the value of this :class:`Field` in the ``instance``. can raise :class:`AttributeError`. This method is used by the :meth:`StdModel.get_attr_value` method when retrieving values form a :class:`StdModel` instance.
[ "Retrieve", "the", "value", ":", "class", ":", "Field", "from", "a", ":", "class", ":", "StdModel", "instance", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/fields.py#L272-L288