repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
lsbardel/python-stdnet
stdnet/odm/fields.py
Field.set_value
def set_value(self, instance, value): '''Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`.''' setattr(instance, self.attname, self.to_python(value))
python
def set_value(self, instance, value): '''Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`.''' setattr(instance, self.attname, self.to_python(value))
[ "def", "set_value", "(", "self", ",", "instance", ",", "value", ")", ":", "setattr", "(", "instance", ",", "self", ".", "attname", ",", "self", ".", "to_python", "(", "value", ")", ")" ]
Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`.
[ "Set", "the", "value", "for", "this", ":", "class", ":", "Field", "in", "a", "instance", "of", "a", ":", "class", ":", "StdModel", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/fields.py#L290-L293
lotabout/pymustache
pymustache/mustache.py
lookup
def lookup(var_name, contexts=(), start=0): """lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found """ start = len(contexts) if start >=0 else start for context in reversed(contexts[:start]): try: if var_name in context: return context[var_name] except TypeError as te: # we may put variable on the context, skip it continue return None
python
def lookup(var_name, contexts=(), start=0): """lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found """ start = len(contexts) if start >=0 else start for context in reversed(contexts[:start]): try: if var_name in context: return context[var_name] except TypeError as te: # we may put variable on the context, skip it continue return None
[ "def", "lookup", "(", "var_name", ",", "contexts", "=", "(", ")", ",", "start", "=", "0", ")", ":", "start", "=", "len", "(", "contexts", ")", "if", "start", ">=", "0", "else", "start", "for", "context", "in", "reversed", "(", "contexts", "[", ":", "start", "]", ")", ":", "try", ":", "if", "var_name", "in", "context", ":", "return", "context", "[", "var_name", "]", "except", "TypeError", "as", "te", ":", "# we may put variable on the context, skip it", "continue", "return", "None" ]
lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found
[ "lookup", "the", "value", "of", "the", "var_name", "on", "the", "stack", "of", "contexts" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L34-L50
lotabout/pymustache
pymustache/mustache.py
delimiters_to_re
def delimiters_to_re(delimiters): """convert delimiters to corresponding regular expressions""" # caching delimiters = tuple(delimiters) if delimiters in re_delimiters: re_tag = re_delimiters[delimiters] else: open_tag, close_tag = delimiters # escape open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag]) close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag]) re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL) re_delimiters[delimiters] = re_tag return re_tag
python
def delimiters_to_re(delimiters): """convert delimiters to corresponding regular expressions""" # caching delimiters = tuple(delimiters) if delimiters in re_delimiters: re_tag = re_delimiters[delimiters] else: open_tag, close_tag = delimiters # escape open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag]) close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag]) re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL) re_delimiters[delimiters] = re_tag return re_tag
[ "def", "delimiters_to_re", "(", "delimiters", ")", ":", "# caching", "delimiters", "=", "tuple", "(", "delimiters", ")", "if", "delimiters", "in", "re_delimiters", ":", "re_tag", "=", "re_delimiters", "[", "delimiters", "]", "else", ":", "open_tag", ",", "close_tag", "=", "delimiters", "# escape", "open_tag", "=", "''", ".", "join", "(", "[", "c", "if", "c", ".", "isalnum", "(", ")", "else", "'\\\\'", "+", "c", "for", "c", "in", "open_tag", "]", ")", "close_tag", "=", "''", ".", "join", "(", "[", "c", "if", "c", ".", "isalnum", "(", ")", "else", "'\\\\'", "+", "c", "for", "c", "in", "close_tag", "]", ")", "re_tag", "=", "re", ".", "compile", "(", "open_tag", "+", "r'([#^>&{/!=]?)\\s*(.*?)\\s*([}=]?)'", "+", "close_tag", ",", "re", ".", "DOTALL", ")", "re_delimiters", "[", "delimiters", "]", "=", "re_tag", "return", "re_tag" ]
convert delimiters to corresponding regular expressions
[ "convert", "delimiters", "to", "corresponding", "regular", "expressions" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L69-L86
lotabout/pymustache
pymustache/mustache.py
is_standalone
def is_standalone(text, start, end): """check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end] """ left = False start -= 1 while start >= 0 and text[start] in spaces_not_newline: start -= 1 if start < 0 or text[start] == '\n': left = True right = re_space.match(text, end) return (start+1, right.end()) if left and right else None
python
def is_standalone(text, start, end): """check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end] """ left = False start -= 1 while start >= 0 and text[start] in spaces_not_newline: start -= 1 if start < 0 or text[start] == '\n': left = True right = re_space.match(text, end) return (start+1, right.end()) if left and right else None
[ "def", "is_standalone", "(", "text", ",", "start", ",", "end", ")", ":", "left", "=", "False", "start", "-=", "1", "while", "start", ">=", "0", "and", "text", "[", "start", "]", "in", "spaces_not_newline", ":", "start", "-=", "1", "if", "start", "<", "0", "or", "text", "[", "start", "]", "==", "'\\n'", ":", "left", "=", "True", "right", "=", "re_space", ".", "match", "(", "text", ",", "end", ")", "return", "(", "start", "+", "1", ",", "right", ".", "end", "(", ")", ")", "if", "left", "and", "right", "else", "None" ]
check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end]
[ "check", "if", "the", "string", "text", "[", "start", ":", "end", "]", "is", "standalone", "by", "checking", "forwards", "and", "backwards", "for", "blankspaces", ":", "text", ":", "TODO", ":", "(", "start", "end", ")", ":", "TODO", ":", "returns", ":", "the", "start", "of", "next", "index", "after", "text", "[", "start", ":", "end", "]" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L91-L108
lotabout/pymustache
pymustache/mustache.py
compiled
def compiled(template, delimiters=DEFAULT_DELIMITERS): """Compile a template into token tree :template: TODO :delimiters: TODO :returns: the root token """ re_tag = delimiters_to_re(delimiters) # variable to save states tokens = [] index = 0 sections = [] tokens_stack = [] # root token root = Root('root') root.filters = copy.copy(filters) m = re_tag.search(template, index) while m is not None: token = None last_literal = None strip_space = False if m.start() > index: last_literal = Literal('str', template[index:m.start()], root=root) tokens.append(last_literal) # parse token prefix, name, suffix = m.groups() if prefix == '=' and suffix == '=': # {{=| |=}} to change delimiters delimiters = re.split(r'\s+', name) if len(delimiters) != 2: raise SyntaxError('Invalid new delimiter definition: ' + m.group()) re_tag = delimiters_to_re(delimiters) strip_space = True elif prefix == '{' and suffix == '}': # {{{ variable }}} token = Variable(name, name, root=root) elif prefix == '' and suffix == '': # {{ name }} token = Variable(name, name, root=root) token.escape = True elif suffix != '' and suffix != None: raise SyntaxError('Invalid token: ' + m.group()) elif prefix == '&': # {{& escaped variable }} token = Variable(name, name, root=root) elif prefix == '!': # {{! comment }} token = Comment(name, root=root) if len(sections) <= 0: # considered as standalone only outside sections strip_space = True elif prefix == '>': # {{> partial}} token = Partial(name, name, root=root) strip_space = True pos = is_standalone(template, m.start(), m.end()) if pos: token.indent = len(template[pos[0]:m.start()]) elif prefix == '#' or prefix == '^': # {{# section }} or # {{^ inverted }} # strip filter sec_name = name.split('|')[0].strip() token = Section(sec_name, name, root=root) if prefix == '#' else Inverted(name, name, root=root) token.delimiter = delimiters tokens.append(token) # save the tokens onto stack token = None tokens_stack.append(tokens) tokens = [] sections.append((sec_name, prefix, m.end())) strip_space = True elif prefix == '/': tag_name, sec_type, text_end = sections.pop() if tag_name != name: raise SyntaxError("unclosed tag: '" + tag_name + "' Got:" + m.group()) children = tokens tokens = tokens_stack.pop() tokens[-1].text = template[text_end:m.start()] tokens[-1].children = children strip_space = True else: raise SyntaxError('Unknown tag: ' + m.group()) if token is not None: tokens.append(token) index = m.end() if strip_space: pos = is_standalone(template, m.start(), m.end()) if pos: index = pos[1] if last_literal: last_literal.value = last_literal.value.rstrip(spaces_not_newline) m = re_tag.search(template, index) tokens.append(Literal('str', template[index:])) root.children = tokens return root
python
def compiled(template, delimiters=DEFAULT_DELIMITERS): """Compile a template into token tree :template: TODO :delimiters: TODO :returns: the root token """ re_tag = delimiters_to_re(delimiters) # variable to save states tokens = [] index = 0 sections = [] tokens_stack = [] # root token root = Root('root') root.filters = copy.copy(filters) m = re_tag.search(template, index) while m is not None: token = None last_literal = None strip_space = False if m.start() > index: last_literal = Literal('str', template[index:m.start()], root=root) tokens.append(last_literal) # parse token prefix, name, suffix = m.groups() if prefix == '=' and suffix == '=': # {{=| |=}} to change delimiters delimiters = re.split(r'\s+', name) if len(delimiters) != 2: raise SyntaxError('Invalid new delimiter definition: ' + m.group()) re_tag = delimiters_to_re(delimiters) strip_space = True elif prefix == '{' and suffix == '}': # {{{ variable }}} token = Variable(name, name, root=root) elif prefix == '' and suffix == '': # {{ name }} token = Variable(name, name, root=root) token.escape = True elif suffix != '' and suffix != None: raise SyntaxError('Invalid token: ' + m.group()) elif prefix == '&': # {{& escaped variable }} token = Variable(name, name, root=root) elif prefix == '!': # {{! comment }} token = Comment(name, root=root) if len(sections) <= 0: # considered as standalone only outside sections strip_space = True elif prefix == '>': # {{> partial}} token = Partial(name, name, root=root) strip_space = True pos = is_standalone(template, m.start(), m.end()) if pos: token.indent = len(template[pos[0]:m.start()]) elif prefix == '#' or prefix == '^': # {{# section }} or # {{^ inverted }} # strip filter sec_name = name.split('|')[0].strip() token = Section(sec_name, name, root=root) if prefix == '#' else Inverted(name, name, root=root) token.delimiter = delimiters tokens.append(token) # save the tokens onto stack token = None tokens_stack.append(tokens) tokens = [] sections.append((sec_name, prefix, m.end())) strip_space = True elif prefix == '/': tag_name, sec_type, text_end = sections.pop() if tag_name != name: raise SyntaxError("unclosed tag: '" + tag_name + "' Got:" + m.group()) children = tokens tokens = tokens_stack.pop() tokens[-1].text = template[text_end:m.start()] tokens[-1].children = children strip_space = True else: raise SyntaxError('Unknown tag: ' + m.group()) if token is not None: tokens.append(token) index = m.end() if strip_space: pos = is_standalone(template, m.start(), m.end()) if pos: index = pos[1] if last_literal: last_literal.value = last_literal.value.rstrip(spaces_not_newline) m = re_tag.search(template, index) tokens.append(Literal('str', template[index:])) root.children = tokens return root
[ "def", "compiled", "(", "template", ",", "delimiters", "=", "DEFAULT_DELIMITERS", ")", ":", "re_tag", "=", "delimiters_to_re", "(", "delimiters", ")", "# variable to save states", "tokens", "=", "[", "]", "index", "=", "0", "sections", "=", "[", "]", "tokens_stack", "=", "[", "]", "# root token", "root", "=", "Root", "(", "'root'", ")", "root", ".", "filters", "=", "copy", ".", "copy", "(", "filters", ")", "m", "=", "re_tag", ".", "search", "(", "template", ",", "index", ")", "while", "m", "is", "not", "None", ":", "token", "=", "None", "last_literal", "=", "None", "strip_space", "=", "False", "if", "m", ".", "start", "(", ")", ">", "index", ":", "last_literal", "=", "Literal", "(", "'str'", ",", "template", "[", "index", ":", "m", ".", "start", "(", ")", "]", ",", "root", "=", "root", ")", "tokens", ".", "append", "(", "last_literal", ")", "# parse token", "prefix", ",", "name", ",", "suffix", "=", "m", ".", "groups", "(", ")", "if", "prefix", "==", "'='", "and", "suffix", "==", "'='", ":", "# {{=| |=}} to change delimiters", "delimiters", "=", "re", ".", "split", "(", "r'\\s+'", ",", "name", ")", "if", "len", "(", "delimiters", ")", "!=", "2", ":", "raise", "SyntaxError", "(", "'Invalid new delimiter definition: '", "+", "m", ".", "group", "(", ")", ")", "re_tag", "=", "delimiters_to_re", "(", "delimiters", ")", "strip_space", "=", "True", "elif", "prefix", "==", "'{'", "and", "suffix", "==", "'}'", ":", "# {{{ variable }}}", "token", "=", "Variable", "(", "name", ",", "name", ",", "root", "=", "root", ")", "elif", "prefix", "==", "''", "and", "suffix", "==", "''", ":", "# {{ name }}", "token", "=", "Variable", "(", "name", ",", "name", ",", "root", "=", "root", ")", "token", ".", "escape", "=", "True", "elif", "suffix", "!=", "''", "and", "suffix", "!=", "None", ":", "raise", "SyntaxError", "(", "'Invalid token: '", "+", "m", ".", "group", "(", ")", ")", "elif", "prefix", "==", "'&'", ":", "# {{& escaped variable }}", "token", "=", "Variable", "(", "name", ",", "name", ",", "root", "=", "root", ")", "elif", "prefix", "==", "'!'", ":", "# {{! comment }}", "token", "=", "Comment", "(", "name", ",", "root", "=", "root", ")", "if", "len", "(", "sections", ")", "<=", "0", ":", "# considered as standalone only outside sections", "strip_space", "=", "True", "elif", "prefix", "==", "'>'", ":", "# {{> partial}}", "token", "=", "Partial", "(", "name", ",", "name", ",", "root", "=", "root", ")", "strip_space", "=", "True", "pos", "=", "is_standalone", "(", "template", ",", "m", ".", "start", "(", ")", ",", "m", ".", "end", "(", ")", ")", "if", "pos", ":", "token", ".", "indent", "=", "len", "(", "template", "[", "pos", "[", "0", "]", ":", "m", ".", "start", "(", ")", "]", ")", "elif", "prefix", "==", "'#'", "or", "prefix", "==", "'^'", ":", "# {{# section }} or # {{^ inverted }}", "# strip filter", "sec_name", "=", "name", ".", "split", "(", "'|'", ")", "[", "0", "]", ".", "strip", "(", ")", "token", "=", "Section", "(", "sec_name", ",", "name", ",", "root", "=", "root", ")", "if", "prefix", "==", "'#'", "else", "Inverted", "(", "name", ",", "name", ",", "root", "=", "root", ")", "token", ".", "delimiter", "=", "delimiters", "tokens", ".", "append", "(", "token", ")", "# save the tokens onto stack", "token", "=", "None", "tokens_stack", ".", "append", "(", "tokens", ")", "tokens", "=", "[", "]", "sections", ".", "append", "(", "(", "sec_name", ",", "prefix", ",", "m", ".", "end", "(", ")", ")", ")", "strip_space", "=", "True", "elif", "prefix", "==", "'/'", ":", "tag_name", ",", "sec_type", ",", "text_end", "=", "sections", ".", "pop", "(", ")", "if", "tag_name", "!=", "name", ":", "raise", "SyntaxError", "(", "\"unclosed tag: '\"", "+", "tag_name", "+", "\"' Got:\"", "+", "m", ".", "group", "(", ")", ")", "children", "=", "tokens", "tokens", "=", "tokens_stack", ".", "pop", "(", ")", "tokens", "[", "-", "1", "]", ".", "text", "=", "template", "[", "text_end", ":", "m", ".", "start", "(", ")", "]", "tokens", "[", "-", "1", "]", ".", "children", "=", "children", "strip_space", "=", "True", "else", ":", "raise", "SyntaxError", "(", "'Unknown tag: '", "+", "m", ".", "group", "(", ")", ")", "if", "token", "is", "not", "None", ":", "tokens", ".", "append", "(", "token", ")", "index", "=", "m", ".", "end", "(", ")", "if", "strip_space", ":", "pos", "=", "is_standalone", "(", "template", ",", "m", ".", "start", "(", ")", ",", "m", ".", "end", "(", ")", ")", "if", "pos", ":", "index", "=", "pos", "[", "1", "]", "if", "last_literal", ":", "last_literal", ".", "value", "=", "last_literal", ".", "value", ".", "rstrip", "(", "spaces_not_newline", ")", "m", "=", "re_tag", ".", "search", "(", "template", ",", "index", ")", "tokens", ".", "append", "(", "Literal", "(", "'str'", ",", "template", "[", "index", ":", "]", ")", ")", "root", ".", "children", "=", "tokens", "return", "root" ]
Compile a template into token tree :template: TODO :delimiters: TODO :returns: the root token
[ "Compile", "a", "template", "into", "token", "tree" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L110-L229
lotabout/pymustache
pymustache/mustache.py
Token._escape
def _escape(self, text): """Escape text according to self.escape""" ret = EMPTYSTRING if text is None else str(text) if self.escape: return html_escape(ret) else: return ret
python
def _escape(self, text): """Escape text according to self.escape""" ret = EMPTYSTRING if text is None else str(text) if self.escape: return html_escape(ret) else: return ret
[ "def", "_escape", "(", "self", ",", "text", ")", ":", "ret", "=", "EMPTYSTRING", "if", "text", "is", "None", "else", "str", "(", "text", ")", "if", "self", ".", "escape", ":", "return", "html_escape", "(", "ret", ")", "else", ":", "return", "ret" ]
Escape text according to self.escape
[ "Escape", "text", "according", "to", "self", ".", "escape" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L263-L269
lotabout/pymustache
pymustache/mustache.py
Token._lookup
def _lookup(self, dot_name, contexts): """lookup value for names like 'a.b.c' and handle filters as well""" # process filters filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))] dot_name = filters[0] filters = filters[1:] # should support paths like '../../a.b.c/../d', etc. if not dot_name.startswith('.'): dot_name = './' + dot_name paths = dot_name.split('/') last_path = paths[-1] # path like '../..' or ./../. etc. refer_context = last_path == '' or last_path == '.' or last_path == '..' paths = paths if refer_context else paths[:-1] # count path level level = 0 for path in paths: if path == '..': level -= 1 elif path != '.': # ../a.b.c/.. in the middle level += len(path.strip('.').split('.')) names = last_path.split('.') # fetch the correct context if refer_context or names[0] == '': try: value = contexts[level-1] except: value = None else: # support {{a.b.c.d.e}} like lookup value = lookup(names[0], contexts, level) # lookup for variables if not refer_context: for name in names[1:]: try: # a.num (a.1, a.2) to access list index = parse_int(name) name = parse_int(name) if isinstance(value, (list, tuple)) else name value = value[name] except: # not found value = None break; # apply filters for f in filters: try: func = self.root.filters[f] value = func(value) except: continue return value
python
def _lookup(self, dot_name, contexts): """lookup value for names like 'a.b.c' and handle filters as well""" # process filters filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))] dot_name = filters[0] filters = filters[1:] # should support paths like '../../a.b.c/../d', etc. if not dot_name.startswith('.'): dot_name = './' + dot_name paths = dot_name.split('/') last_path = paths[-1] # path like '../..' or ./../. etc. refer_context = last_path == '' or last_path == '.' or last_path == '..' paths = paths if refer_context else paths[:-1] # count path level level = 0 for path in paths: if path == '..': level -= 1 elif path != '.': # ../a.b.c/.. in the middle level += len(path.strip('.').split('.')) names = last_path.split('.') # fetch the correct context if refer_context or names[0] == '': try: value = contexts[level-1] except: value = None else: # support {{a.b.c.d.e}} like lookup value = lookup(names[0], contexts, level) # lookup for variables if not refer_context: for name in names[1:]: try: # a.num (a.1, a.2) to access list index = parse_int(name) name = parse_int(name) if isinstance(value, (list, tuple)) else name value = value[name] except: # not found value = None break; # apply filters for f in filters: try: func = self.root.filters[f] value = func(value) except: continue return value
[ "def", "_lookup", "(", "self", ",", "dot_name", ",", "contexts", ")", ":", "# process filters", "filters", "=", "[", "x", "for", "x", "in", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "dot_name", ".", "split", "(", "'|'", ")", ")", "]", "dot_name", "=", "filters", "[", "0", "]", "filters", "=", "filters", "[", "1", ":", "]", "# should support paths like '../../a.b.c/../d', etc.", "if", "not", "dot_name", ".", "startswith", "(", "'.'", ")", ":", "dot_name", "=", "'./'", "+", "dot_name", "paths", "=", "dot_name", ".", "split", "(", "'/'", ")", "last_path", "=", "paths", "[", "-", "1", "]", "# path like '../..' or ./../. etc.", "refer_context", "=", "last_path", "==", "''", "or", "last_path", "==", "'.'", "or", "last_path", "==", "'..'", "paths", "=", "paths", "if", "refer_context", "else", "paths", "[", ":", "-", "1", "]", "# count path level", "level", "=", "0", "for", "path", "in", "paths", ":", "if", "path", "==", "'..'", ":", "level", "-=", "1", "elif", "path", "!=", "'.'", ":", "# ../a.b.c/.. in the middle", "level", "+=", "len", "(", "path", ".", "strip", "(", "'.'", ")", ".", "split", "(", "'.'", ")", ")", "names", "=", "last_path", ".", "split", "(", "'.'", ")", "# fetch the correct context", "if", "refer_context", "or", "names", "[", "0", "]", "==", "''", ":", "try", ":", "value", "=", "contexts", "[", "level", "-", "1", "]", "except", ":", "value", "=", "None", "else", ":", "# support {{a.b.c.d.e}} like lookup", "value", "=", "lookup", "(", "names", "[", "0", "]", ",", "contexts", ",", "level", ")", "# lookup for variables", "if", "not", "refer_context", ":", "for", "name", "in", "names", "[", "1", ":", "]", ":", "try", ":", "# a.num (a.1, a.2) to access list", "index", "=", "parse_int", "(", "name", ")", "name", "=", "parse_int", "(", "name", ")", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", "else", "name", "value", "=", "value", "[", "name", "]", "except", ":", "# not found", "value", "=", "None", "break", "# apply filters", "for", "f", "in", "filters", ":", "try", ":", "func", "=", "self", ".", "root", ".", "filters", "[", "f", "]", "value", "=", "func", "(", "value", ")", "except", ":", "continue", "return", "value" ]
lookup value for names like 'a.b.c' and handle filters as well
[ "lookup", "value", "for", "names", "like", "a", ".", "b", ".", "c", "and", "handle", "filters", "as", "well" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L271-L332
lotabout/pymustache
pymustache/mustache.py
Token._render_children
def _render_children(self, contexts, partials): """Render the children tokens""" ret = [] for child in self.children: ret.append(child._render(contexts, partials)) return EMPTYSTRING.join(ret)
python
def _render_children(self, contexts, partials): """Render the children tokens""" ret = [] for child in self.children: ret.append(child._render(contexts, partials)) return EMPTYSTRING.join(ret)
[ "def", "_render_children", "(", "self", ",", "contexts", ",", "partials", ")", ":", "ret", "=", "[", "]", "for", "child", "in", "self", ".", "children", ":", "ret", ".", "append", "(", "child", ".", "_render", "(", "contexts", ",", "partials", ")", ")", "return", "EMPTYSTRING", ".", "join", "(", "ret", ")" ]
Render the children tokens
[ "Render", "the", "children", "tokens" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L334-L339
lotabout/pymustache
pymustache/mustache.py
Variable._render
def _render(self, contexts, partials): """render variable""" value = self._lookup(self.value, contexts) # lambda if callable(value): value = inner_render(str(value()), contexts, partials) return self._escape(value)
python
def _render(self, contexts, partials): """render variable""" value = self._lookup(self.value, contexts) # lambda if callable(value): value = inner_render(str(value()), contexts, partials) return self._escape(value)
[ "def", "_render", "(", "self", ",", "contexts", ",", "partials", ")", ":", "value", "=", "self", ".", "_lookup", "(", "self", ".", "value", ",", "contexts", ")", "# lambda", "if", "callable", "(", "value", ")", ":", "value", "=", "inner_render", "(", "str", "(", "value", "(", ")", ")", ",", "contexts", ",", "partials", ")", "return", "self", ".", "_escape", "(", "value", ")" ]
render variable
[ "render", "variable" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L385-L393
lotabout/pymustache
pymustache/mustache.py
Section._render
def _render(self, contexts, partials): """render section""" val = self._lookup(self.value, contexts) if not val: # false value return EMPTYSTRING # normally json has types: number/string/list/map # but python has more, so we decide that map and string should not iterate # by default, other do. if hasattr(val, "__iter__") and not isinstance(val, (str, dict)): # non-empty lists ret = [] for item in val: contexts.append(item) ret.append(self._render_children(contexts, partials)) contexts.pop() if len(ret) <= 0: # empty lists return EMPTYSTRING return self._escape(''.join(ret)) elif callable(val): # lambdas new_template = val(self.text) value = inner_render(new_template, contexts, partials, self.delimiter) else: # context contexts.append(val) value = self._render_children(contexts, partials) contexts.pop() return self._escape(value)
python
def _render(self, contexts, partials): """render section""" val = self._lookup(self.value, contexts) if not val: # false value return EMPTYSTRING # normally json has types: number/string/list/map # but python has more, so we decide that map and string should not iterate # by default, other do. if hasattr(val, "__iter__") and not isinstance(val, (str, dict)): # non-empty lists ret = [] for item in val: contexts.append(item) ret.append(self._render_children(contexts, partials)) contexts.pop() if len(ret) <= 0: # empty lists return EMPTYSTRING return self._escape(''.join(ret)) elif callable(val): # lambdas new_template = val(self.text) value = inner_render(new_template, contexts, partials, self.delimiter) else: # context contexts.append(val) value = self._render_children(contexts, partials) contexts.pop() return self._escape(value)
[ "def", "_render", "(", "self", ",", "contexts", ",", "partials", ")", ":", "val", "=", "self", ".", "_lookup", "(", "self", ".", "value", ",", "contexts", ")", "if", "not", "val", ":", "# false value", "return", "EMPTYSTRING", "# normally json has types: number/string/list/map", "# but python has more, so we decide that map and string should not iterate", "# by default, other do.", "if", "hasattr", "(", "val", ",", "\"__iter__\"", ")", "and", "not", "isinstance", "(", "val", ",", "(", "str", ",", "dict", ")", ")", ":", "# non-empty lists", "ret", "=", "[", "]", "for", "item", "in", "val", ":", "contexts", ".", "append", "(", "item", ")", "ret", ".", "append", "(", "self", ".", "_render_children", "(", "contexts", ",", "partials", ")", ")", "contexts", ".", "pop", "(", ")", "if", "len", "(", "ret", ")", "<=", "0", ":", "# empty lists", "return", "EMPTYSTRING", "return", "self", ".", "_escape", "(", "''", ".", "join", "(", "ret", ")", ")", "elif", "callable", "(", "val", ")", ":", "# lambdas", "new_template", "=", "val", "(", "self", ".", "text", ")", "value", "=", "inner_render", "(", "new_template", ",", "contexts", ",", "partials", ",", "self", ".", "delimiter", ")", "else", ":", "# context", "contexts", ".", "append", "(", "val", ")", "value", "=", "self", ".", "_render_children", "(", "contexts", ",", "partials", ")", "contexts", ".", "pop", "(", ")", "return", "self", ".", "_escape", "(", "value", ")" ]
render section
[ "render", "section" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L400-L434
lotabout/pymustache
pymustache/mustache.py
Inverted._render
def _render(self, contexts, partials): """render inverted section""" val = self._lookup(self.value, contexts) if val: return EMPTYSTRING return self._render_children(contexts, partials)
python
def _render(self, contexts, partials): """render inverted section""" val = self._lookup(self.value, contexts) if val: return EMPTYSTRING return self._render_children(contexts, partials)
[ "def", "_render", "(", "self", ",", "contexts", ",", "partials", ")", ":", "val", "=", "self", ".", "_lookup", "(", "self", ".", "value", ",", "contexts", ")", "if", "val", ":", "return", "EMPTYSTRING", "return", "self", ".", "_render_children", "(", "contexts", ",", "partials", ")" ]
render inverted section
[ "render", "inverted", "section" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L442-L447
lotabout/pymustache
pymustache/mustache.py
Partial._render
def _render(self, contexts, partials): """render partials""" try: partial = partials[self.value] except KeyError as e: return self._escape(EMPTYSTRING) partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial) return inner_render(partial, contexts, partials, self.delimiter)
python
def _render(self, contexts, partials): """render partials""" try: partial = partials[self.value] except KeyError as e: return self._escape(EMPTYSTRING) partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial) return inner_render(partial, contexts, partials, self.delimiter)
[ "def", "_render", "(", "self", ",", "contexts", ",", "partials", ")", ":", "try", ":", "partial", "=", "partials", "[", "self", ".", "value", "]", "except", "KeyError", "as", "e", ":", "return", "self", ".", "_escape", "(", "EMPTYSTRING", ")", "partial", "=", "re_insert_indent", ".", "sub", "(", "r'\\1'", "+", "' '", "*", "self", ".", "indent", ",", "partial", ")", "return", "inner_render", "(", "partial", ",", "contexts", ",", "partials", ",", "self", ".", "delimiter", ")" ]
render partials
[ "render", "partials" ]
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L465-L474
rodluger/everest
everest/missions/k2/k2.py
Setup
def Setup(): ''' Called when the code is installed. Sets up directories and downloads the K2 catalog. ''' if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')): os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv')) GetK2Stars(clobber=False)
python
def Setup(): ''' Called when the code is installed. Sets up directories and downloads the K2 catalog. ''' if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')): os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv')) GetK2Stars(clobber=False)
[ "def", "Setup", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'cbv'", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'cbv'", ")", ")", "GetK2Stars", "(", "clobber", "=", "False", ")" ]
Called when the code is installed. Sets up directories and downloads the K2 catalog.
[ "Called", "when", "the", "code", "is", "installed", ".", "Sets", "up", "directories", "and", "downloads", "the", "K2", "catalog", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L50-L59
rodluger/everest
everest/missions/k2/k2.py
CDPP
def CDPP(flux, mask=[], cadence='lc'): ''' Compute the proxy 6-hr CDPP metric. :param array_like flux: The flux array to compute the CDPP for :param array_like mask: The indices to be masked :param str cadence: The light curve cadence. Default `lc` ''' # 13 cadences is 6.5 hours rmswin = 13 # Smooth the data on a 2 day timescale svgwin = 49 # If short cadence, need to downbin if cadence == 'sc': newsize = len(flux) // 30 flux = Downbin(flux, newsize, operation='mean') flux_savgol = SavGol(np.delete(flux, mask), win=svgwin) if len(flux_savgol): return Scatter(flux_savgol / np.nanmedian(flux_savgol), remove_outliers=True, win=rmswin) else: return np.nan
python
def CDPP(flux, mask=[], cadence='lc'): ''' Compute the proxy 6-hr CDPP metric. :param array_like flux: The flux array to compute the CDPP for :param array_like mask: The indices to be masked :param str cadence: The light curve cadence. Default `lc` ''' # 13 cadences is 6.5 hours rmswin = 13 # Smooth the data on a 2 day timescale svgwin = 49 # If short cadence, need to downbin if cadence == 'sc': newsize = len(flux) // 30 flux = Downbin(flux, newsize, operation='mean') flux_savgol = SavGol(np.delete(flux, mask), win=svgwin) if len(flux_savgol): return Scatter(flux_savgol / np.nanmedian(flux_savgol), remove_outliers=True, win=rmswin) else: return np.nan
[ "def", "CDPP", "(", "flux", ",", "mask", "=", "[", "]", ",", "cadence", "=", "'lc'", ")", ":", "# 13 cadences is 6.5 hours", "rmswin", "=", "13", "# Smooth the data on a 2 day timescale", "svgwin", "=", "49", "# If short cadence, need to downbin", "if", "cadence", "==", "'sc'", ":", "newsize", "=", "len", "(", "flux", ")", "//", "30", "flux", "=", "Downbin", "(", "flux", ",", "newsize", ",", "operation", "=", "'mean'", ")", "flux_savgol", "=", "SavGol", "(", "np", ".", "delete", "(", "flux", ",", "mask", ")", ",", "win", "=", "svgwin", ")", "if", "len", "(", "flux_savgol", ")", ":", "return", "Scatter", "(", "flux_savgol", "/", "np", ".", "nanmedian", "(", "flux_savgol", ")", ",", "remove_outliers", "=", "True", ",", "win", "=", "rmswin", ")", "else", ":", "return", "np", ".", "nan" ]
Compute the proxy 6-hr CDPP metric. :param array_like flux: The flux array to compute the CDPP for :param array_like mask: The indices to be masked :param str cadence: The light curve cadence. Default `lc`
[ "Compute", "the", "proxy", "6", "-", "hr", "CDPP", "metric", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L169-L194
rodluger/everest
everest/missions/k2/k2.py
GetData
def GetData(EPIC, season=None, cadence='lc', clobber=False, delete_raw=False, aperture_name='k2sff_15', saturated_aperture_name='k2sff_19', max_pixels=75, download_only=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=True, get_nearby=True, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int EPIC: The EPIC ID number :param int season: The observing season (campaign). Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? \ Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select \ `custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15` :param str saturated_aperture_name: The name of the aperture to use if \ the target is saturated. Default `k2sff_19` :param int max_pixels: Maximum number of pixels in the TPF. Default 75 :param bool download_only: Download raw TPF and return? Default \ :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season # Is there short cadence data available for this target? short_cadence = HasShortCadence(EPIC, season=campaign) if cadence == 'sc' and not short_cadence: raise ValueError("Short cadence data not available for this target.") # Local file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Download? if clobber or not os.path.exists(filename): # Get the TPF tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_lpd-targ.fits.gz' % (EPIC, campaign)) sc_tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_spd-targ.fits.gz' % (EPIC, campaign)) if clobber or not os.path.exists(tpf): kplr_client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: qdata = f[1].data # Get the TPF aperture tpf_aperture = (f[2].data & 2) // 2 # Get the enlarged TPF aperture tpf_big_aperture = np.array(tpf_aperture) for i in range(tpf_big_aperture.shape[0]): for j in range(tpf_big_aperture.shape[1]): if f[2].data[i][j] == 1: for n in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]: if n[0] >= 0 and n[0] < tpf_big_aperture.shape[0]: if n[1] >= 0 and n[1] < \ tpf_big_aperture.shape[1]: if tpf_aperture[n[0]][n[1]] == 1: tpf_big_aperture[i][j] = 1 # Is there short cadence data? if short_cadence: with pyfits.open(sc_tpf) as f: sc_qdata = f[1].data # Get K2SFF apertures try: k2sff = kplr.K2SFF(EPIC, sci_campaign=campaign) k2sff_apertures = k2sff.apertures if delete_raw: os.remove(k2sff._file) except: k2sff_apertures = [None for i in range(20)] # Make a dict of all our apertures # We're not getting K2SFF apertures 0-9 any more apertures = {'tpf': tpf_aperture, 'tpf_big': tpf_big_aperture} for i in range(10, 20): apertures.update({'k2sff_%02d' % i: k2sff_apertures[i]}) # Get the header info fitsheader = [pyfits.getheader(tpf, 0).cards, pyfits.getheader(tpf, 1).cards, pyfits.getheader(tpf, 2).cards] if short_cadence: sc_fitsheader = [pyfits.getheader(sc_tpf, 0).cards, pyfits.getheader(sc_tpf, 1).cards, pyfits.getheader(sc_tpf, 2).cards] else: sc_fitsheader = None # Get a hi res image of the target if get_hires: hires = GetHiResImage(EPIC) else: hires = None # Get nearby sources if get_nearby: nearby = GetSources(EPIC) else: nearby = [] # Delete? if delete_raw: os.remove(tpf) if short_cadence: os.remove(sc_tpf) # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Do the same for short cadence if short_cadence: sc_cadn = np.array(sc_qdata.field('CADENCENO'), dtype='int32') sc_time = np.array(sc_qdata.field('TIME'), dtype='float64') sc_fpix = np.array(sc_qdata.field('FLUX'), dtype='float64') sc_fpix_err = np.array(sc_qdata.field('FLUX_ERR'), dtype='float64') sc_qual = np.array(sc_qdata.field('QUALITY'), dtype=int) sc_naninds = np.where(np.isnan(sc_time)) sc_time = Interpolate( np.arange(0, len(sc_time)), sc_naninds, sc_time) sc_pc1 = np.array(sc_qdata.field('POS_CORR1'), dtype='float64') sc_pc2 = np.array(sc_qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(sc_pc1)) and not np.all(np.isnan(sc_pc2)): sc_pc1 = Interpolate( sc_time, np.where(np.isnan(sc_pc1)), sc_pc1) sc_pc2 = Interpolate( sc_time, np.where(np.isnan(sc_pc2)), sc_pc2) else: sc_pc1 = None sc_pc2 = None else: sc_cadn = None sc_time = None sc_fpix = None sc_fpix_err = None sc_qual = None sc_pc1 = None sc_pc2 = None # Static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, apertures=apertures, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires, sc_cadn=sc_cadn, sc_time=sc_time, sc_fpix=sc_fpix, sc_fpix_err=sc_fpix_err, sc_qual=sc_qual, sc_pc1=sc_pc1, sc_pc2=sc_pc2, sc_fitsheader=sc_fitsheader) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) if download_only: return # Load data = np.load(filename) apertures = data['apertures'][()] pixel_images = data['pixel_images'] nearby = data['nearby'] hires = data['hires'][()] if cadence == 'lc': fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] elif cadence == 'sc': fitsheader = data['sc_fitsheader'] cadn = data['sc_cadn'] time = data['sc_time'] fpix = data['sc_fpix'] fpix_err = data['sc_fpix_err'] qual = data['sc_qual'] pc1 = data['sc_pc1'] pc2 = data['sc_pc2'] else: raise ValueError("Invalid value for the cadence.") # Select the "saturated aperture" to check if the star is saturated # If it is, we will use this aperture instead if saturated_aperture_name == 'custom': saturated_aperture = GetCustomAperture(data) else: if saturated_aperture_name is None: saturated_aperture_name = 'k2sff_19' saturated_aperture = apertures[saturated_aperture_name] if saturated_aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") saturated_aperture_name = 'tpf_big' saturated_aperture = apertures[saturated_aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if saturated_aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the saturated aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if saturated_aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False else: log.info("Saturated pixel(s) found. Switching to aperture `%s`." % saturated_aperture_name) aperture_name = saturated_aperture_name saturated = True # Now grab the aperture we'll actually use if aperture_name == 'custom': aperture = GetCustomAperture(data) else: if aperture_name is None: aperture_name = 'k2sff_15' aperture = apertures[aperture_name] if aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") aperture_name = 'tpf_big' aperture = apertures[aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Now we check if the aperture is too big. Can lead to memory errors... # Treat saturated and unsaturated stars differently. if saturated: # Need to check if we have too many pixels *after* collapsing columns. # Sort the apertures in decreasing order of pixels, but keep the apert. # chosen by the user first. aperture_names = np.array(list(apertures.keys())) npix_per_aperture = np.array( [np.sum(apertures[k]) for k in aperture_names]) aperture_names = aperture_names[np.argsort(npix_per_aperture)[::-1]] aperture_names = np.append([aperture_name], np.delete( aperture_names, np.argmax(aperture_names == aperture_name))) # Loop through them. Pick the first one that satisfies # the `max_pixels` constraint for aperture_name in aperture_names: aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 ncol = 0 apcopy = np.array(aperture) for j in range(apcopy.shape[1]): if np.any(f97[:, j] > satflx): apcopy[:, j] = 0 ncol += 1 if np.sum(apcopy) + ncol <= max_pixels: break if np.sum(apcopy) + ncol > max_pixels: log.error( "No apertures available with fewer than %d pixels. Aborting." % max_pixels) return None # Now, finally, we collapse the saturated columns into single pixels # and make the pixel array 2D ncol = 0 fpixnew = [] ferrnew = [] # HACK: K2SFF sometimes clips the heads/tails of saturated columns # That's really bad, since that's where all the information is. Let's # artificially extend the aperture by two pixels at the top and bottom # of each saturated column. This *could* increase contamination, but # it's unlikely since the saturated target is by definition really # bright ext = 0 for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): for i in range(aperture.shape[0]): if (aperture[i, j] == 0) and \ (np.nanmedian(fpix[:, i, j]) > 0): if (i + 2 < aperture.shape[0]) and \ aperture[i + 2, j] == 1: aperture[i, j] = 2 ext += 1 elif (i + 1 < aperture.shape[0]) and \ aperture[i + 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 1 >= 0) and aperture[i - 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 2 >= 0) and aperture[i - 2, j] == 1: aperture[i, j] = 2 ext += 1 if ext: log.info("Extended saturated columns by %d pixel(s)." % ext) for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) else: # Check if there are too many pixels if np.sum(aperture) > max_pixels: # This case is simpler: we just pick the largest aperture # that's less than or equal to `max_pixels` keys = list(apertures.keys()) npix = np.array([np.sum(apertures[k]) for k in keys]) aperture_name = keys[np.argmax(npix * (npix <= max_pixels))] aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 if np.sum(aperture) > max_pixels: log.error("No apertures available with fewer than " + "%d pixels. Aborting." % max_pixels) return None log.warn( "Selected aperture is too big. Proceeding with aperture " + "`%s` instead." % aperture_name) # Make the pixel flux array 2D aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) ferr = np.sqrt(np.sum(fpix_err ** 2, axis=1)) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # TODO: Fix time offsets in first half of # Campaign 0. See note in everest 1.0 code # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = aperture_name data.apertures = apertures data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
python
def GetData(EPIC, season=None, cadence='lc', clobber=False, delete_raw=False, aperture_name='k2sff_15', saturated_aperture_name='k2sff_19', max_pixels=75, download_only=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=True, get_nearby=True, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int EPIC: The EPIC ID number :param int season: The observing season (campaign). Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? \ Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select \ `custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15` :param str saturated_aperture_name: The name of the aperture to use if \ the target is saturated. Default `k2sff_19` :param int max_pixels: Maximum number of pixels in the TPF. Default 75 :param bool download_only: Download raw TPF and return? Default \ :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season # Is there short cadence data available for this target? short_cadence = HasShortCadence(EPIC, season=campaign) if cadence == 'sc' and not short_cadence: raise ValueError("Short cadence data not available for this target.") # Local file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Download? if clobber or not os.path.exists(filename): # Get the TPF tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_lpd-targ.fits.gz' % (EPIC, campaign)) sc_tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_spd-targ.fits.gz' % (EPIC, campaign)) if clobber or not os.path.exists(tpf): kplr_client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: qdata = f[1].data # Get the TPF aperture tpf_aperture = (f[2].data & 2) // 2 # Get the enlarged TPF aperture tpf_big_aperture = np.array(tpf_aperture) for i in range(tpf_big_aperture.shape[0]): for j in range(tpf_big_aperture.shape[1]): if f[2].data[i][j] == 1: for n in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]: if n[0] >= 0 and n[0] < tpf_big_aperture.shape[0]: if n[1] >= 0 and n[1] < \ tpf_big_aperture.shape[1]: if tpf_aperture[n[0]][n[1]] == 1: tpf_big_aperture[i][j] = 1 # Is there short cadence data? if short_cadence: with pyfits.open(sc_tpf) as f: sc_qdata = f[1].data # Get K2SFF apertures try: k2sff = kplr.K2SFF(EPIC, sci_campaign=campaign) k2sff_apertures = k2sff.apertures if delete_raw: os.remove(k2sff._file) except: k2sff_apertures = [None for i in range(20)] # Make a dict of all our apertures # We're not getting K2SFF apertures 0-9 any more apertures = {'tpf': tpf_aperture, 'tpf_big': tpf_big_aperture} for i in range(10, 20): apertures.update({'k2sff_%02d' % i: k2sff_apertures[i]}) # Get the header info fitsheader = [pyfits.getheader(tpf, 0).cards, pyfits.getheader(tpf, 1).cards, pyfits.getheader(tpf, 2).cards] if short_cadence: sc_fitsheader = [pyfits.getheader(sc_tpf, 0).cards, pyfits.getheader(sc_tpf, 1).cards, pyfits.getheader(sc_tpf, 2).cards] else: sc_fitsheader = None # Get a hi res image of the target if get_hires: hires = GetHiResImage(EPIC) else: hires = None # Get nearby sources if get_nearby: nearby = GetSources(EPIC) else: nearby = [] # Delete? if delete_raw: os.remove(tpf) if short_cadence: os.remove(sc_tpf) # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Do the same for short cadence if short_cadence: sc_cadn = np.array(sc_qdata.field('CADENCENO'), dtype='int32') sc_time = np.array(sc_qdata.field('TIME'), dtype='float64') sc_fpix = np.array(sc_qdata.field('FLUX'), dtype='float64') sc_fpix_err = np.array(sc_qdata.field('FLUX_ERR'), dtype='float64') sc_qual = np.array(sc_qdata.field('QUALITY'), dtype=int) sc_naninds = np.where(np.isnan(sc_time)) sc_time = Interpolate( np.arange(0, len(sc_time)), sc_naninds, sc_time) sc_pc1 = np.array(sc_qdata.field('POS_CORR1'), dtype='float64') sc_pc2 = np.array(sc_qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(sc_pc1)) and not np.all(np.isnan(sc_pc2)): sc_pc1 = Interpolate( sc_time, np.where(np.isnan(sc_pc1)), sc_pc1) sc_pc2 = Interpolate( sc_time, np.where(np.isnan(sc_pc2)), sc_pc2) else: sc_pc1 = None sc_pc2 = None else: sc_cadn = None sc_time = None sc_fpix = None sc_fpix_err = None sc_qual = None sc_pc1 = None sc_pc2 = None # Static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, apertures=apertures, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires, sc_cadn=sc_cadn, sc_time=sc_time, sc_fpix=sc_fpix, sc_fpix_err=sc_fpix_err, sc_qual=sc_qual, sc_pc1=sc_pc1, sc_pc2=sc_pc2, sc_fitsheader=sc_fitsheader) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) if download_only: return # Load data = np.load(filename) apertures = data['apertures'][()] pixel_images = data['pixel_images'] nearby = data['nearby'] hires = data['hires'][()] if cadence == 'lc': fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] elif cadence == 'sc': fitsheader = data['sc_fitsheader'] cadn = data['sc_cadn'] time = data['sc_time'] fpix = data['sc_fpix'] fpix_err = data['sc_fpix_err'] qual = data['sc_qual'] pc1 = data['sc_pc1'] pc2 = data['sc_pc2'] else: raise ValueError("Invalid value for the cadence.") # Select the "saturated aperture" to check if the star is saturated # If it is, we will use this aperture instead if saturated_aperture_name == 'custom': saturated_aperture = GetCustomAperture(data) else: if saturated_aperture_name is None: saturated_aperture_name = 'k2sff_19' saturated_aperture = apertures[saturated_aperture_name] if saturated_aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") saturated_aperture_name = 'tpf_big' saturated_aperture = apertures[saturated_aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if saturated_aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the saturated aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if saturated_aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False else: log.info("Saturated pixel(s) found. Switching to aperture `%s`." % saturated_aperture_name) aperture_name = saturated_aperture_name saturated = True # Now grab the aperture we'll actually use if aperture_name == 'custom': aperture = GetCustomAperture(data) else: if aperture_name is None: aperture_name = 'k2sff_15' aperture = apertures[aperture_name] if aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") aperture_name = 'tpf_big' aperture = apertures[aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Now we check if the aperture is too big. Can lead to memory errors... # Treat saturated and unsaturated stars differently. if saturated: # Need to check if we have too many pixels *after* collapsing columns. # Sort the apertures in decreasing order of pixels, but keep the apert. # chosen by the user first. aperture_names = np.array(list(apertures.keys())) npix_per_aperture = np.array( [np.sum(apertures[k]) for k in aperture_names]) aperture_names = aperture_names[np.argsort(npix_per_aperture)[::-1]] aperture_names = np.append([aperture_name], np.delete( aperture_names, np.argmax(aperture_names == aperture_name))) # Loop through them. Pick the first one that satisfies # the `max_pixels` constraint for aperture_name in aperture_names: aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 ncol = 0 apcopy = np.array(aperture) for j in range(apcopy.shape[1]): if np.any(f97[:, j] > satflx): apcopy[:, j] = 0 ncol += 1 if np.sum(apcopy) + ncol <= max_pixels: break if np.sum(apcopy) + ncol > max_pixels: log.error( "No apertures available with fewer than %d pixels. Aborting." % max_pixels) return None # Now, finally, we collapse the saturated columns into single pixels # and make the pixel array 2D ncol = 0 fpixnew = [] ferrnew = [] # HACK: K2SFF sometimes clips the heads/tails of saturated columns # That's really bad, since that's where all the information is. Let's # artificially extend the aperture by two pixels at the top and bottom # of each saturated column. This *could* increase contamination, but # it's unlikely since the saturated target is by definition really # bright ext = 0 for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): for i in range(aperture.shape[0]): if (aperture[i, j] == 0) and \ (np.nanmedian(fpix[:, i, j]) > 0): if (i + 2 < aperture.shape[0]) and \ aperture[i + 2, j] == 1: aperture[i, j] = 2 ext += 1 elif (i + 1 < aperture.shape[0]) and \ aperture[i + 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 1 >= 0) and aperture[i - 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 2 >= 0) and aperture[i - 2, j] == 1: aperture[i, j] = 2 ext += 1 if ext: log.info("Extended saturated columns by %d pixel(s)." % ext) for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) else: # Check if there are too many pixels if np.sum(aperture) > max_pixels: # This case is simpler: we just pick the largest aperture # that's less than or equal to `max_pixels` keys = list(apertures.keys()) npix = np.array([np.sum(apertures[k]) for k in keys]) aperture_name = keys[np.argmax(npix * (npix <= max_pixels))] aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 if np.sum(aperture) > max_pixels: log.error("No apertures available with fewer than " + "%d pixels. Aborting." % max_pixels) return None log.warn( "Selected aperture is too big. Proceeding with aperture " + "`%s` instead." % aperture_name) # Make the pixel flux array 2D aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) ferr = np.sqrt(np.sum(fpix_err ** 2, axis=1)) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # TODO: Fix time offsets in first half of # Campaign 0. See note in everest 1.0 code # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = aperture_name data.apertures = apertures data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
[ "def", "GetData", "(", "EPIC", ",", "season", "=", "None", ",", "cadence", "=", "'lc'", ",", "clobber", "=", "False", ",", "delete_raw", "=", "False", ",", "aperture_name", "=", "'k2sff_15'", ",", "saturated_aperture_name", "=", "'k2sff_19'", ",", "max_pixels", "=", "75", ",", "download_only", "=", "False", ",", "saturation_tolerance", "=", "-", "0.1", ",", "bad_bits", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "11", ",", "12", ",", "13", ",", "14", ",", "16", ",", "17", "]", ",", "get_hires", "=", "True", ",", "get_nearby", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Campaign no.", "if", "season", "is", "None", ":", "campaign", "=", "Season", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "else", ":", "campaign", "=", "season", "# Is there short cadence data available for this target?", "short_cadence", "=", "HasShortCadence", "(", "EPIC", ",", "season", "=", "campaign", ")", "if", "cadence", "==", "'sc'", "and", "not", "short_cadence", ":", "raise", "ValueError", "(", "\"Short cadence data not available for this target.\"", ")", "# Local file name", "filename", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "campaign", ",", "(", "'%09d'", "%", "EPIC", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "EPIC", ")", "[", "4", ":", "]", ",", "'data.npz'", ")", "# Download?", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "# Get the TPF", "tpf", "=", "os", ".", "path", ".", "join", "(", "KPLR_ROOT", ",", "'data'", ",", "'k2'", ",", "'target_pixel_files'", ",", "str", "(", "EPIC", ")", ",", "'ktwo%09d-c%02d_lpd-targ.fits.gz'", "%", "(", "EPIC", ",", "campaign", ")", ")", "sc_tpf", "=", "os", ".", "path", ".", "join", "(", "KPLR_ROOT", ",", "'data'", ",", "'k2'", ",", "'target_pixel_files'", ",", "str", "(", "EPIC", ")", ",", "'ktwo%09d-c%02d_spd-targ.fits.gz'", "%", "(", "EPIC", ",", "campaign", ")", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "tpf", ")", ":", "kplr_client", ".", "k2_star", "(", "EPIC", ")", ".", "get_target_pixel_files", "(", "fetch", "=", "True", ")", "with", "pyfits", ".", "open", "(", "tpf", ")", "as", "f", ":", "qdata", "=", "f", "[", "1", "]", ".", "data", "# Get the TPF aperture", "tpf_aperture", "=", "(", "f", "[", "2", "]", ".", "data", "&", "2", ")", "//", "2", "# Get the enlarged TPF aperture", "tpf_big_aperture", "=", "np", ".", "array", "(", "tpf_aperture", ")", "for", "i", "in", "range", "(", "tpf_big_aperture", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "tpf_big_aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "f", "[", "2", "]", ".", "data", "[", "i", "]", "[", "j", "]", "==", "1", ":", "for", "n", "in", "[", "(", "i", "-", "1", ",", "j", ")", ",", "(", "i", "+", "1", ",", "j", ")", ",", "(", "i", ",", "j", "-", "1", ")", ",", "(", "i", ",", "j", "+", "1", ")", "]", ":", "if", "n", "[", "0", "]", ">=", "0", "and", "n", "[", "0", "]", "<", "tpf_big_aperture", ".", "shape", "[", "0", "]", ":", "if", "n", "[", "1", "]", ">=", "0", "and", "n", "[", "1", "]", "<", "tpf_big_aperture", ".", "shape", "[", "1", "]", ":", "if", "tpf_aperture", "[", "n", "[", "0", "]", "]", "[", "n", "[", "1", "]", "]", "==", "1", ":", "tpf_big_aperture", "[", "i", "]", "[", "j", "]", "=", "1", "# Is there short cadence data?", "if", "short_cadence", ":", "with", "pyfits", ".", "open", "(", "sc_tpf", ")", "as", "f", ":", "sc_qdata", "=", "f", "[", "1", "]", ".", "data", "# Get K2SFF apertures", "try", ":", "k2sff", "=", "kplr", ".", "K2SFF", "(", "EPIC", ",", "sci_campaign", "=", "campaign", ")", "k2sff_apertures", "=", "k2sff", ".", "apertures", "if", "delete_raw", ":", "os", ".", "remove", "(", "k2sff", ".", "_file", ")", "except", ":", "k2sff_apertures", "=", "[", "None", "for", "i", "in", "range", "(", "20", ")", "]", "# Make a dict of all our apertures", "# We're not getting K2SFF apertures 0-9 any more", "apertures", "=", "{", "'tpf'", ":", "tpf_aperture", ",", "'tpf_big'", ":", "tpf_big_aperture", "}", "for", "i", "in", "range", "(", "10", ",", "20", ")", ":", "apertures", ".", "update", "(", "{", "'k2sff_%02d'", "%", "i", ":", "k2sff_apertures", "[", "i", "]", "}", ")", "# Get the header info", "fitsheader", "=", "[", "pyfits", ".", "getheader", "(", "tpf", ",", "0", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "tpf", ",", "1", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "tpf", ",", "2", ")", ".", "cards", "]", "if", "short_cadence", ":", "sc_fitsheader", "=", "[", "pyfits", ".", "getheader", "(", "sc_tpf", ",", "0", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "sc_tpf", ",", "1", ")", ".", "cards", ",", "pyfits", ".", "getheader", "(", "sc_tpf", ",", "2", ")", ".", "cards", "]", "else", ":", "sc_fitsheader", "=", "None", "# Get a hi res image of the target", "if", "get_hires", ":", "hires", "=", "GetHiResImage", "(", "EPIC", ")", "else", ":", "hires", "=", "None", "# Get nearby sources", "if", "get_nearby", ":", "nearby", "=", "GetSources", "(", "EPIC", ")", "else", ":", "nearby", "=", "[", "]", "# Delete?", "if", "delete_raw", ":", "os", ".", "remove", "(", "tpf", ")", "if", "short_cadence", ":", "os", ".", "remove", "(", "sc_tpf", ")", "# Get the arrays", "cadn", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'CADENCENO'", ")", ",", "dtype", "=", "'int32'", ")", "time", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'TIME'", ")", ",", "dtype", "=", "'float64'", ")", "fpix", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'FLUX'", ")", ",", "dtype", "=", "'float64'", ")", "fpix_err", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'FLUX_ERR'", ")", ",", "dtype", "=", "'float64'", ")", "qual", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'QUALITY'", ")", ",", "dtype", "=", "int", ")", "# Get rid of NaNs in the time array by interpolating", "naninds", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "time", ")", ")", "time", "=", "Interpolate", "(", "np", ".", "arange", "(", "0", ",", "len", "(", "time", ")", ")", ",", "naninds", ",", "time", ")", "# Get the motion vectors (if available!)", "pc1", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'POS_CORR1'", ")", ",", "dtype", "=", "'float64'", ")", "pc2", "=", "np", ".", "array", "(", "qdata", ".", "field", "(", "'POS_CORR2'", ")", ",", "dtype", "=", "'float64'", ")", "if", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "pc1", ")", ")", "and", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "pc2", ")", ")", ":", "pc1", "=", "Interpolate", "(", "time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "pc1", ")", ")", ",", "pc1", ")", "pc2", "=", "Interpolate", "(", "time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "pc2", ")", ")", ",", "pc2", ")", "else", ":", "pc1", "=", "None", "pc2", "=", "None", "# Do the same for short cadence", "if", "short_cadence", ":", "sc_cadn", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'CADENCENO'", ")", ",", "dtype", "=", "'int32'", ")", "sc_time", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'TIME'", ")", ",", "dtype", "=", "'float64'", ")", "sc_fpix", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'FLUX'", ")", ",", "dtype", "=", "'float64'", ")", "sc_fpix_err", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'FLUX_ERR'", ")", ",", "dtype", "=", "'float64'", ")", "sc_qual", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'QUALITY'", ")", ",", "dtype", "=", "int", ")", "sc_naninds", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "sc_time", ")", ")", "sc_time", "=", "Interpolate", "(", "np", ".", "arange", "(", "0", ",", "len", "(", "sc_time", ")", ")", ",", "sc_naninds", ",", "sc_time", ")", "sc_pc1", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'POS_CORR1'", ")", ",", "dtype", "=", "'float64'", ")", "sc_pc2", "=", "np", ".", "array", "(", "sc_qdata", ".", "field", "(", "'POS_CORR2'", ")", ",", "dtype", "=", "'float64'", ")", "if", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "sc_pc1", ")", ")", "and", "not", "np", ".", "all", "(", "np", ".", "isnan", "(", "sc_pc2", ")", ")", ":", "sc_pc1", "=", "Interpolate", "(", "sc_time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "sc_pc1", ")", ")", ",", "sc_pc1", ")", "sc_pc2", "=", "Interpolate", "(", "sc_time", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "sc_pc2", ")", ")", ",", "sc_pc2", ")", "else", ":", "sc_pc1", "=", "None", "sc_pc2", "=", "None", "else", ":", "sc_cadn", "=", "None", "sc_time", "=", "None", "sc_fpix", "=", "None", "sc_fpix_err", "=", "None", "sc_qual", "=", "None", "sc_pc1", "=", "None", "sc_pc2", "=", "None", "# Static pixel images for plotting", "pixel_images", "=", "[", "fpix", "[", "0", "]", ",", "fpix", "[", "len", "(", "fpix", ")", "//", "2", "]", ",", "fpix", "[", "len", "(", "fpix", ")", "-", "1", "]", "]", "# Atomically write to disk.", "# http://stackoverflow.com/questions/2333872/", "# atomic-writing-to-file-with-python", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "f", "=", "NamedTemporaryFile", "(", "\"wb\"", ",", "delete", "=", "False", ")", "np", ".", "savez_compressed", "(", "f", ",", "cadn", "=", "cadn", ",", "time", "=", "time", ",", "fpix", "=", "fpix", ",", "fpix_err", "=", "fpix_err", ",", "qual", "=", "qual", ",", "apertures", "=", "apertures", ",", "pc1", "=", "pc1", ",", "pc2", "=", "pc2", ",", "fitsheader", "=", "fitsheader", ",", "pixel_images", "=", "pixel_images", ",", "nearby", "=", "nearby", ",", "hires", "=", "hires", ",", "sc_cadn", "=", "sc_cadn", ",", "sc_time", "=", "sc_time", ",", "sc_fpix", "=", "sc_fpix", ",", "sc_fpix_err", "=", "sc_fpix_err", ",", "sc_qual", "=", "sc_qual", ",", "sc_pc1", "=", "sc_pc1", ",", "sc_pc2", "=", "sc_pc2", ",", "sc_fitsheader", "=", "sc_fitsheader", ")", "f", ".", "flush", "(", ")", "os", ".", "fsync", "(", "f", ".", "fileno", "(", ")", ")", "f", ".", "close", "(", ")", "shutil", ".", "move", "(", "f", ".", "name", ",", "filename", ")", "if", "download_only", ":", "return", "# Load", "data", "=", "np", ".", "load", "(", "filename", ")", "apertures", "=", "data", "[", "'apertures'", "]", "[", "(", ")", "]", "pixel_images", "=", "data", "[", "'pixel_images'", "]", "nearby", "=", "data", "[", "'nearby'", "]", "hires", "=", "data", "[", "'hires'", "]", "[", "(", ")", "]", "if", "cadence", "==", "'lc'", ":", "fitsheader", "=", "data", "[", "'fitsheader'", "]", "cadn", "=", "data", "[", "'cadn'", "]", "time", "=", "data", "[", "'time'", "]", "fpix", "=", "data", "[", "'fpix'", "]", "fpix_err", "=", "data", "[", "'fpix_err'", "]", "qual", "=", "data", "[", "'qual'", "]", "pc1", "=", "data", "[", "'pc1'", "]", "pc2", "=", "data", "[", "'pc2'", "]", "elif", "cadence", "==", "'sc'", ":", "fitsheader", "=", "data", "[", "'sc_fitsheader'", "]", "cadn", "=", "data", "[", "'sc_cadn'", "]", "time", "=", "data", "[", "'sc_time'", "]", "fpix", "=", "data", "[", "'sc_fpix'", "]", "fpix_err", "=", "data", "[", "'sc_fpix_err'", "]", "qual", "=", "data", "[", "'sc_qual'", "]", "pc1", "=", "data", "[", "'sc_pc1'", "]", "pc2", "=", "data", "[", "'sc_pc2'", "]", "else", ":", "raise", "ValueError", "(", "\"Invalid value for the cadence.\"", ")", "# Select the \"saturated aperture\" to check if the star is saturated", "# If it is, we will use this aperture instead", "if", "saturated_aperture_name", "==", "'custom'", ":", "saturated_aperture", "=", "GetCustomAperture", "(", "data", ")", "else", ":", "if", "saturated_aperture_name", "is", "None", ":", "saturated_aperture_name", "=", "'k2sff_19'", "saturated_aperture", "=", "apertures", "[", "saturated_aperture_name", "]", "if", "saturated_aperture", "is", "None", ":", "log", ".", "error", "(", "\"Invalid aperture selected. Defaulting to `tpf_big`.\"", ")", "saturated_aperture_name", "=", "'tpf_big'", "saturated_aperture", "=", "apertures", "[", "saturated_aperture_name", "]", "# HACK: Some C05 K2SFF apertures don't match the target pixel file", "# pixel grid size. This is likely because they're defined on the M67", "# superstamp. For now, let's ignore these stars.", "if", "saturated_aperture", ".", "shape", "!=", "fpix", ".", "shape", "[", "1", ":", "]", ":", "log", ".", "error", "(", "\"Aperture size mismatch!\"", ")", "return", "None", "# Compute the saturation flux and the 97.5th percentile", "# flux in each pixel of the saturated aperture. We're going", "# to compare these to decide if the star is saturated.", "satflx", "=", "SaturationFlux", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "*", "(", "1.", "+", "saturation_tolerance", ")", "f97", "=", "np", ".", "zeros", "(", "(", "fpix", ".", "shape", "[", "1", "]", ",", "fpix", ".", "shape", "[", "2", "]", ")", ")", "for", "i", "in", "range", "(", "fpix", ".", "shape", "[", "1", "]", ")", ":", "for", "j", "in", "range", "(", "fpix", ".", "shape", "[", "2", "]", ")", ":", "if", "saturated_aperture", "[", "i", ",", "j", "]", ":", "# Let's remove NaNs...", "tmp", "=", "np", ".", "delete", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ",", "np", ".", "where", "(", "np", ".", "isnan", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ")", ")", ")", "# ... and really bad outliers...", "if", "len", "(", "tmp", ")", ":", "f", "=", "SavGol", "(", "tmp", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "bad", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "10.", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "10.", "*", "MAD", ")", ")", "[", "0", "]", "np", ".", "delete", "(", "tmp", ",", "bad", ")", "# ... so we can compute the 97.5th percentile flux", "i97", "=", "int", "(", "0.975", "*", "len", "(", "tmp", ")", ")", "tmp", "=", "tmp", "[", "np", ".", "argsort", "(", "tmp", ")", "[", "i97", "]", "]", "f97", "[", "i", ",", "j", "]", "=", "tmp", "# Check if any of the pixels are actually saturated", "if", "np", ".", "nanmax", "(", "f97", ")", "<=", "satflx", ":", "log", ".", "info", "(", "\"No saturated columns detected.\"", ")", "saturated", "=", "False", "else", ":", "log", ".", "info", "(", "\"Saturated pixel(s) found. Switching to aperture `%s`.\"", "%", "saturated_aperture_name", ")", "aperture_name", "=", "saturated_aperture_name", "saturated", "=", "True", "# Now grab the aperture we'll actually use", "if", "aperture_name", "==", "'custom'", ":", "aperture", "=", "GetCustomAperture", "(", "data", ")", "else", ":", "if", "aperture_name", "is", "None", ":", "aperture_name", "=", "'k2sff_15'", "aperture", "=", "apertures", "[", "aperture_name", "]", "if", "aperture", "is", "None", ":", "log", ".", "error", "(", "\"Invalid aperture selected. Defaulting to `tpf_big`.\"", ")", "aperture_name", "=", "'tpf_big'", "aperture", "=", "apertures", "[", "aperture_name", "]", "# HACK: Some C05 K2SFF apertures don't match the target pixel file", "# pixel grid size. This is likely because they're defined on the M67", "# superstamp. For now, let's ignore these stars.", "if", "aperture", ".", "shape", "!=", "fpix", ".", "shape", "[", "1", ":", "]", ":", "log", ".", "error", "(", "\"Aperture size mismatch!\"", ")", "return", "None", "# Now we check if the aperture is too big. Can lead to memory errors...", "# Treat saturated and unsaturated stars differently.", "if", "saturated", ":", "# Need to check if we have too many pixels *after* collapsing columns.", "# Sort the apertures in decreasing order of pixels, but keep the apert.", "# chosen by the user first.", "aperture_names", "=", "np", ".", "array", "(", "list", "(", "apertures", ".", "keys", "(", ")", ")", ")", "npix_per_aperture", "=", "np", ".", "array", "(", "[", "np", ".", "sum", "(", "apertures", "[", "k", "]", ")", "for", "k", "in", "aperture_names", "]", ")", "aperture_names", "=", "aperture_names", "[", "np", ".", "argsort", "(", "npix_per_aperture", ")", "[", ":", ":", "-", "1", "]", "]", "aperture_names", "=", "np", ".", "append", "(", "[", "aperture_name", "]", ",", "np", ".", "delete", "(", "aperture_names", ",", "np", ".", "argmax", "(", "aperture_names", "==", "aperture_name", ")", ")", ")", "# Loop through them. Pick the first one that satisfies", "# the `max_pixels` constraint", "for", "aperture_name", "in", "aperture_names", ":", "aperture", "=", "apertures", "[", "aperture_name", "]", "aperture", "[", "np", ".", "isnan", "(", "fpix", "[", "0", "]", ")", "]", "=", "0", "ncol", "=", "0", "apcopy", "=", "np", ".", "array", "(", "aperture", ")", "for", "j", "in", "range", "(", "apcopy", ".", "shape", "[", "1", "]", ")", ":", "if", "np", ".", "any", "(", "f97", "[", ":", ",", "j", "]", ">", "satflx", ")", ":", "apcopy", "[", ":", ",", "j", "]", "=", "0", "ncol", "+=", "1", "if", "np", ".", "sum", "(", "apcopy", ")", "+", "ncol", "<=", "max_pixels", ":", "break", "if", "np", ".", "sum", "(", "apcopy", ")", "+", "ncol", ">", "max_pixels", ":", "log", ".", "error", "(", "\"No apertures available with fewer than %d pixels. Aborting.\"", "%", "max_pixels", ")", "return", "None", "# Now, finally, we collapse the saturated columns into single pixels", "# and make the pixel array 2D", "ncol", "=", "0", "fpixnew", "=", "[", "]", "ferrnew", "=", "[", "]", "# HACK: K2SFF sometimes clips the heads/tails of saturated columns", "# That's really bad, since that's where all the information is. Let's", "# artificially extend the aperture by two pixels at the top and bottom", "# of each saturated column. This *could* increase contamination, but", "# it's unlikely since the saturated target is by definition really", "# bright", "ext", "=", "0", "for", "j", "in", "range", "(", "aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "np", ".", "any", "(", "f97", "[", ":", ",", "j", "]", ">", "satflx", ")", ":", "for", "i", "in", "range", "(", "aperture", ".", "shape", "[", "0", "]", ")", ":", "if", "(", "aperture", "[", "i", ",", "j", "]", "==", "0", ")", "and", "(", "np", ".", "nanmedian", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ")", ">", "0", ")", ":", "if", "(", "i", "+", "2", "<", "aperture", ".", "shape", "[", "0", "]", ")", "and", "aperture", "[", "i", "+", "2", ",", "j", "]", "==", "1", ":", "aperture", "[", "i", ",", "j", "]", "=", "2", "ext", "+=", "1", "elif", "(", "i", "+", "1", "<", "aperture", ".", "shape", "[", "0", "]", ")", "and", "aperture", "[", "i", "+", "1", ",", "j", "]", "==", "1", ":", "aperture", "[", "i", ",", "j", "]", "=", "2", "ext", "+=", "1", "elif", "(", "i", "-", "1", ">=", "0", ")", "and", "aperture", "[", "i", "-", "1", ",", "j", "]", "==", "1", ":", "aperture", "[", "i", ",", "j", "]", "=", "2", "ext", "+=", "1", "elif", "(", "i", "-", "2", ">=", "0", ")", "and", "aperture", "[", "i", "-", "2", ",", "j", "]", "==", "1", ":", "aperture", "[", "i", ",", "j", "]", "=", "2", "ext", "+=", "1", "if", "ext", ":", "log", ".", "info", "(", "\"Extended saturated columns by %d pixel(s).\"", "%", "ext", ")", "for", "j", "in", "range", "(", "aperture", ".", "shape", "[", "1", "]", ")", ":", "if", "np", ".", "any", "(", "f97", "[", ":", ",", "j", "]", ">", "satflx", ")", ":", "marked", "=", "False", "collapsed", "=", "np", ".", "zeros", "(", "len", "(", "fpix", "[", ":", ",", "0", ",", "0", "]", ")", ")", "collapsed_err2", "=", "np", ".", "zeros", "(", "len", "(", "fpix", "[", ":", ",", "0", ",", "0", "]", ")", ")", "for", "i", "in", "range", "(", "aperture", ".", "shape", "[", "0", "]", ")", ":", "if", "aperture", "[", "i", ",", "j", "]", ":", "if", "not", "marked", ":", "aperture", "[", "i", ",", "j", "]", "=", "AP_COLLAPSED_PIXEL", "marked", "=", "True", "else", ":", "aperture", "[", "i", ",", "j", "]", "=", "AP_SATURATED_PIXEL", "collapsed", "+=", "fpix", "[", ":", ",", "i", ",", "j", "]", "collapsed_err2", "+=", "fpix_err", "[", ":", ",", "i", ",", "j", "]", "**", "2", "if", "np", ".", "any", "(", "collapsed", ")", ":", "fpixnew", ".", "append", "(", "collapsed", ")", "ferrnew", ".", "append", "(", "np", ".", "sqrt", "(", "collapsed_err2", ")", ")", "ncol", "+=", "1", "else", ":", "for", "i", "in", "range", "(", "aperture", ".", "shape", "[", "0", "]", ")", ":", "if", "aperture", "[", "i", ",", "j", "]", ":", "fpixnew", ".", "append", "(", "fpix", "[", ":", ",", "i", ",", "j", "]", ")", "ferrnew", ".", "append", "(", "fpix_err", "[", ":", ",", "i", ",", "j", "]", ")", "fpix2D", "=", "np", ".", "array", "(", "fpixnew", ")", ".", "T", "fpix_err2D", "=", "np", ".", "array", "(", "ferrnew", ")", ".", "T", "log", ".", "info", "(", "\"Collapsed %d saturated column(s).\"", "%", "ncol", ")", "else", ":", "# Check if there are too many pixels", "if", "np", ".", "sum", "(", "aperture", ")", ">", "max_pixels", ":", "# This case is simpler: we just pick the largest aperture", "# that's less than or equal to `max_pixels`", "keys", "=", "list", "(", "apertures", ".", "keys", "(", ")", ")", "npix", "=", "np", ".", "array", "(", "[", "np", ".", "sum", "(", "apertures", "[", "k", "]", ")", "for", "k", "in", "keys", "]", ")", "aperture_name", "=", "keys", "[", "np", ".", "argmax", "(", "npix", "*", "(", "npix", "<=", "max_pixels", ")", ")", "]", "aperture", "=", "apertures", "[", "aperture_name", "]", "aperture", "[", "np", ".", "isnan", "(", "fpix", "[", "0", "]", ")", "]", "=", "0", "if", "np", ".", "sum", "(", "aperture", ")", ">", "max_pixels", ":", "log", ".", "error", "(", "\"No apertures available with fewer than \"", "+", "\"%d pixels. Aborting.\"", "%", "max_pixels", ")", "return", "None", "log", ".", "warn", "(", "\"Selected aperture is too big. Proceeding with aperture \"", "+", "\"`%s` instead.\"", "%", "aperture_name", ")", "# Make the pixel flux array 2D", "aperture", "[", "np", ".", "isnan", "(", "fpix", "[", "0", "]", ")", "]", "=", "0", "ap", "=", "np", ".", "where", "(", "aperture", "&", "1", ")", "fpix2D", "=", "np", ".", "array", "(", "[", "f", "[", "ap", "]", "for", "f", "in", "fpix", "]", ",", "dtype", "=", "'float64'", ")", "fpix_err2D", "=", "np", ".", "array", "(", "[", "p", "[", "ap", "]", "for", "p", "in", "fpix_err", "]", ",", "dtype", "=", "'float64'", ")", "# Compute the background", "binds", "=", "np", ".", "where", "(", "aperture", "^", "1", ")", "if", "RemoveBackground", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "and", "(", "len", "(", "binds", "[", "0", "]", ")", ">", "0", ")", ":", "bkg", "=", "np", ".", "nanmedian", "(", "np", ".", "array", "(", "[", "f", "[", "binds", "]", "for", "f", "in", "fpix", "]", ",", "dtype", "=", "'float64'", ")", ",", "axis", "=", "1", ")", "# Uncertainty of the median:", "# http://davidmlane.com/hyperstat/A106993.html", "bkg_err", "=", "1.253", "*", "np", ".", "nanmedian", "(", "np", ".", "array", "(", "[", "e", "[", "binds", "]", "for", "e", "in", "fpix_err", "]", ",", "dtype", "=", "'float64'", ")", ",", "axis", "=", "1", ")", "/", "np", ".", "sqrt", "(", "len", "(", "binds", "[", "0", "]", ")", ")", "bkg", "=", "bkg", ".", "reshape", "(", "-", "1", ",", "1", ")", "bkg_err", "=", "bkg_err", ".", "reshape", "(", "-", "1", ",", "1", ")", "else", ":", "bkg", "=", "0.", "bkg_err", "=", "0.", "# Make everything 2D and remove the background", "fpix", "=", "fpix2D", "-", "bkg", "fpix_err", "=", "np", ".", "sqrt", "(", "fpix_err2D", "**", "2", "+", "bkg_err", "**", "2", ")", "flux", "=", "np", ".", "sum", "(", "fpix", ",", "axis", "=", "1", ")", "ferr", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "fpix_err", "**", "2", ",", "axis", "=", "1", ")", ")", "# Get NaN data points", "nanmask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", "|", "(", "flux", "==", "0", ")", ")", "[", "0", "]", "# Get flagged data points -- we won't train our model on them", "badmask", "=", "[", "]", "for", "b", "in", "bad_bits", ":", "badmask", "+=", "list", "(", "np", ".", "where", "(", "qual", "&", "2", "**", "(", "b", "-", "1", ")", ")", "[", "0", "]", ")", "# Flag >10 sigma outliers -- same thing.", "tmpmask", "=", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "badmask", ",", "nanmask", "]", ")", ")", ")", ")", "t", "=", "np", ".", "delete", "(", "time", ",", "tmpmask", ")", "f", "=", "np", ".", "delete", "(", "flux", ",", "tmpmask", ")", "f", "=", "SavGol", "(", "f", ")", "med", "=", "np", ".", "nanmedian", "(", "f", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "f", "-", "med", ")", ")", "bad", "=", "np", ".", "where", "(", "(", "f", ">", "med", "+", "10.", "*", "MAD", ")", "|", "(", "f", "<", "med", "-", "10.", "*", "MAD", ")", ")", "[", "0", "]", "badmask", ".", "extend", "(", "[", "np", ".", "argmax", "(", "time", "==", "t", "[", "i", "]", ")", "for", "i", "in", "bad", "]", ")", "# Campaign 2 hack: the first day or two are screwed up", "if", "campaign", "==", "2", ":", "badmask", ".", "extend", "(", "np", ".", "where", "(", "time", "<", "2061.5", ")", "[", "0", "]", ")", "# TODO: Fix time offsets in first half of", "# Campaign 0. See note in everest 1.0 code", "# Finalize the mask", "badmask", "=", "np", ".", "array", "(", "sorted", "(", "list", "(", "set", "(", "badmask", ")", ")", ")", ")", "# Interpolate the nans", "fpix", "=", "Interpolate", "(", "time", ",", "nanmask", ",", "fpix", ")", "fpix_err", "=", "Interpolate", "(", "time", ",", "nanmask", ",", "fpix_err", ")", "# Return", "data", "=", "DataContainer", "(", ")", "data", ".", "ID", "=", "EPIC", "data", ".", "campaign", "=", "campaign", "data", ".", "cadn", "=", "cadn", "data", ".", "time", "=", "time", "data", ".", "fpix", "=", "fpix", "data", ".", "fpix_err", "=", "fpix_err", "data", ".", "nanmask", "=", "nanmask", "data", ".", "badmask", "=", "badmask", "data", ".", "aperture", "=", "aperture", "data", ".", "aperture_name", "=", "aperture_name", "data", ".", "apertures", "=", "apertures", "data", ".", "quality", "=", "qual", "data", ".", "Xpos", "=", "pc1", "data", ".", "Ypos", "=", "pc2", "data", ".", "meta", "=", "fitsheader", "data", ".", "mag", "=", "fitsheader", "[", "0", "]", "[", "'KEPMAG'", "]", "[", "1", "]", "data", ".", "pixel_images", "=", "pixel_images", "data", ".", "nearby", "=", "nearby", "data", ".", "hires", "=", "hires", "data", ".", "saturated", "=", "saturated", "data", ".", "bkg", "=", "bkg", "return", "data" ]
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int EPIC: The EPIC ID number :param int season: The observing season (campaign). Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? \ Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select \ `custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15` :param str saturated_aperture_name: The name of the aperture to use if \ the target is saturated. Default `k2sff_19` :param int max_pixels: Maximum number of pixels in the TPF. Default 75 :param bool download_only: Download raw TPF and return? Default \ :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True`
[ "Returns", "a", ":", "py", ":", "obj", ":", "DataContainer", "instance", "with", "the", "raw", "data", "for", "the", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L197-L712
rodluger/everest
everest/missions/k2/k2.py
GetNeighbors
def GetNeighbors(EPIC, season=None, model=None, neighbors=10, mag_range=(11., 13.), cdpp_range=None, aperture_name='k2sff_15', cadence='lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None` ''' # Zero neighbors? if neighbors == 0: return [] # Get the IDs # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[ campaign]).T short_cadence = np.array(short_cadence, dtype=bool) epics = np.array(epics, dtype=int) c = GetNeighboringChannels(Channel(EPIC, campaign=season)) # Manage kwargs if aperture_name is None: aperture_name = 'k2sff_15' if mag_range is None: mag_lo = -np.inf mag_hi = np.inf else: mag_lo = mag_range[0] mag_hi = mag_range[1] # K2-specific tweak. The short cadence stars are preferentially # really bright ones, so we won't get many neighbors if we # stick to the default magnitude range! I'm # therefore enforcing a lower magnitude cut-off of 8. if cadence == 'sc': mag_lo = 8. if cdpp_range is None: cdpp_lo = -np.inf cdpp_hi = np.inf else: cdpp_lo = cdpp_range[0] cdpp_hi = cdpp_range[1] targets = [] # First look for nearby targets, then relax the constraint # If still no targets, widen magnitude range for n in range(3): if n == 0: nearby = True elif n == 1: nearby = False elif n == 2: mag_lo -= 1 mag_hi += 1 # Loop over all stars for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence): # Preliminary vetting if not (((channel in c) if nearby else True) and (kp < mag_hi) \ and (kp > mag_lo) and (sc if cadence == 'sc' else True)): continue # Reject if self or if already in list if (star == EPIC) or (star in targets): continue # Ensure raw light curve file exists if not os.path.exists( os.path.join(TargetDirectory(star, campaign), 'data.npz')): continue # Ensure crowding is OK. This is quite conservative, as we # need to prevent potential astrophysical false positive # contamination from crowded planet-hosting neighbors when # doing neighboring PLD. contam = False data = np.load(os.path.join( TargetDirectory(star, campaign), 'data.npz')) aperture = data['apertures'][()][aperture_name] # Check that the aperture exists! if aperture is None: continue fpix = data['fpix'] for source in data['nearby'][()]: # Ignore self if source['ID'] == star: continue # Ignore really dim stars if source['mag'] < kp - 5: continue # Compute source position x = int(np.round(source['x'] - source['x0'])) y = int(np.round(source['y'] - source['y0'])) # If the source is within two pixels of the edge # of the target aperture, reject the target for j in [x - 2, x - 1, x, x + 1, x + 2]: if j < 0: # Outside the postage stamp continue for i in [y - 2, y - 1, y, y + 1, y + 2]: if i < 0: # Outside the postage stamp continue try: if aperture[i][j]: # Oh-oh! contam = True except IndexError: # Out of bounds... carry on! pass if contam: continue # HACK: This happens for K2SFF M67 targets in C05. # Let's skip them if aperture.shape != fpix.shape[1:]: continue # Reject if the model is not present if model is not None: if not os.path.exists(os.path.join( TargetDirectory(star, campaign), model + '.npz')): continue # Reject if CDPP out of range if cdpp_range is not None: cdpp = np.load(os.path.join(TargetDirectory( star, campaign), model + '.npz'))['cdpp'] if (cdpp > cdpp_hi) or (cdpp < cdpp_lo): continue # Passed all the tests! targets.append(star) # Do we have enough? If so, return if len(targets) == neighbors: random.shuffle(targets) return targets # If we get to this point, we didn't find enough neighbors... # Return what we have anyway. return targets
python
def GetNeighbors(EPIC, season=None, model=None, neighbors=10, mag_range=(11., 13.), cdpp_range=None, aperture_name='k2sff_15', cadence='lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None` ''' # Zero neighbors? if neighbors == 0: return [] # Get the IDs # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[ campaign]).T short_cadence = np.array(short_cadence, dtype=bool) epics = np.array(epics, dtype=int) c = GetNeighboringChannels(Channel(EPIC, campaign=season)) # Manage kwargs if aperture_name is None: aperture_name = 'k2sff_15' if mag_range is None: mag_lo = -np.inf mag_hi = np.inf else: mag_lo = mag_range[0] mag_hi = mag_range[1] # K2-specific tweak. The short cadence stars are preferentially # really bright ones, so we won't get many neighbors if we # stick to the default magnitude range! I'm # therefore enforcing a lower magnitude cut-off of 8. if cadence == 'sc': mag_lo = 8. if cdpp_range is None: cdpp_lo = -np.inf cdpp_hi = np.inf else: cdpp_lo = cdpp_range[0] cdpp_hi = cdpp_range[1] targets = [] # First look for nearby targets, then relax the constraint # If still no targets, widen magnitude range for n in range(3): if n == 0: nearby = True elif n == 1: nearby = False elif n == 2: mag_lo -= 1 mag_hi += 1 # Loop over all stars for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence): # Preliminary vetting if not (((channel in c) if nearby else True) and (kp < mag_hi) \ and (kp > mag_lo) and (sc if cadence == 'sc' else True)): continue # Reject if self or if already in list if (star == EPIC) or (star in targets): continue # Ensure raw light curve file exists if not os.path.exists( os.path.join(TargetDirectory(star, campaign), 'data.npz')): continue # Ensure crowding is OK. This is quite conservative, as we # need to prevent potential astrophysical false positive # contamination from crowded planet-hosting neighbors when # doing neighboring PLD. contam = False data = np.load(os.path.join( TargetDirectory(star, campaign), 'data.npz')) aperture = data['apertures'][()][aperture_name] # Check that the aperture exists! if aperture is None: continue fpix = data['fpix'] for source in data['nearby'][()]: # Ignore self if source['ID'] == star: continue # Ignore really dim stars if source['mag'] < kp - 5: continue # Compute source position x = int(np.round(source['x'] - source['x0'])) y = int(np.round(source['y'] - source['y0'])) # If the source is within two pixels of the edge # of the target aperture, reject the target for j in [x - 2, x - 1, x, x + 1, x + 2]: if j < 0: # Outside the postage stamp continue for i in [y - 2, y - 1, y, y + 1, y + 2]: if i < 0: # Outside the postage stamp continue try: if aperture[i][j]: # Oh-oh! contam = True except IndexError: # Out of bounds... carry on! pass if contam: continue # HACK: This happens for K2SFF M67 targets in C05. # Let's skip them if aperture.shape != fpix.shape[1:]: continue # Reject if the model is not present if model is not None: if not os.path.exists(os.path.join( TargetDirectory(star, campaign), model + '.npz')): continue # Reject if CDPP out of range if cdpp_range is not None: cdpp = np.load(os.path.join(TargetDirectory( star, campaign), model + '.npz'))['cdpp'] if (cdpp > cdpp_hi) or (cdpp < cdpp_lo): continue # Passed all the tests! targets.append(star) # Do we have enough? If so, return if len(targets) == neighbors: random.shuffle(targets) return targets # If we get to this point, we didn't find enough neighbors... # Return what we have anyway. return targets
[ "def", "GetNeighbors", "(", "EPIC", ",", "season", "=", "None", ",", "model", "=", "None", ",", "neighbors", "=", "10", ",", "mag_range", "=", "(", "11.", ",", "13.", ")", ",", "cdpp_range", "=", "None", ",", "aperture_name", "=", "'k2sff_15'", ",", "cadence", "=", "'lc'", ",", "*", "*", "kwargs", ")", ":", "# Zero neighbors?", "if", "neighbors", "==", "0", ":", "return", "[", "]", "# Get the IDs", "# Campaign no.", "if", "season", "is", "None", ":", "campaign", "=", "Season", "(", "EPIC", ")", "if", "hasattr", "(", "campaign", ",", "'__len__'", ")", ":", "raise", "AttributeError", "(", "\"Please choose a campaign/season for this target: %s.\"", "%", "campaign", ")", "else", ":", "campaign", "=", "season", "epics", ",", "kepmags", ",", "channels", ",", "short_cadence", "=", "np", ".", "array", "(", "GetK2Stars", "(", ")", "[", "campaign", "]", ")", ".", "T", "short_cadence", "=", "np", ".", "array", "(", "short_cadence", ",", "dtype", "=", "bool", ")", "epics", "=", "np", ".", "array", "(", "epics", ",", "dtype", "=", "int", ")", "c", "=", "GetNeighboringChannels", "(", "Channel", "(", "EPIC", ",", "campaign", "=", "season", ")", ")", "# Manage kwargs", "if", "aperture_name", "is", "None", ":", "aperture_name", "=", "'k2sff_15'", "if", "mag_range", "is", "None", ":", "mag_lo", "=", "-", "np", ".", "inf", "mag_hi", "=", "np", ".", "inf", "else", ":", "mag_lo", "=", "mag_range", "[", "0", "]", "mag_hi", "=", "mag_range", "[", "1", "]", "# K2-specific tweak. The short cadence stars are preferentially", "# really bright ones, so we won't get many neighbors if we", "# stick to the default magnitude range! I'm", "# therefore enforcing a lower magnitude cut-off of 8.", "if", "cadence", "==", "'sc'", ":", "mag_lo", "=", "8.", "if", "cdpp_range", "is", "None", ":", "cdpp_lo", "=", "-", "np", ".", "inf", "cdpp_hi", "=", "np", ".", "inf", "else", ":", "cdpp_lo", "=", "cdpp_range", "[", "0", "]", "cdpp_hi", "=", "cdpp_range", "[", "1", "]", "targets", "=", "[", "]", "# First look for nearby targets, then relax the constraint", "# If still no targets, widen magnitude range", "for", "n", "in", "range", "(", "3", ")", ":", "if", "n", "==", "0", ":", "nearby", "=", "True", "elif", "n", "==", "1", ":", "nearby", "=", "False", "elif", "n", "==", "2", ":", "mag_lo", "-=", "1", "mag_hi", "+=", "1", "# Loop over all stars", "for", "star", ",", "kp", ",", "channel", ",", "sc", "in", "zip", "(", "epics", ",", "kepmags", ",", "channels", ",", "short_cadence", ")", ":", "# Preliminary vetting", "if", "not", "(", "(", "(", "channel", "in", "c", ")", "if", "nearby", "else", "True", ")", "and", "(", "kp", "<", "mag_hi", ")", "and", "(", "kp", ">", "mag_lo", ")", "and", "(", "sc", "if", "cadence", "==", "'sc'", "else", "True", ")", ")", ":", "continue", "# Reject if self or if already in list", "if", "(", "star", "==", "EPIC", ")", "or", "(", "star", "in", "targets", ")", ":", "continue", "# Ensure raw light curve file exists", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "'data.npz'", ")", ")", ":", "continue", "# Ensure crowding is OK. This is quite conservative, as we", "# need to prevent potential astrophysical false positive", "# contamination from crowded planet-hosting neighbors when", "# doing neighboring PLD.", "contam", "=", "False", "data", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "'data.npz'", ")", ")", "aperture", "=", "data", "[", "'apertures'", "]", "[", "(", ")", "]", "[", "aperture_name", "]", "# Check that the aperture exists!", "if", "aperture", "is", "None", ":", "continue", "fpix", "=", "data", "[", "'fpix'", "]", "for", "source", "in", "data", "[", "'nearby'", "]", "[", "(", ")", "]", ":", "# Ignore self", "if", "source", "[", "'ID'", "]", "==", "star", ":", "continue", "# Ignore really dim stars", "if", "source", "[", "'mag'", "]", "<", "kp", "-", "5", ":", "continue", "# Compute source position", "x", "=", "int", "(", "np", ".", "round", "(", "source", "[", "'x'", "]", "-", "source", "[", "'x0'", "]", ")", ")", "y", "=", "int", "(", "np", ".", "round", "(", "source", "[", "'y'", "]", "-", "source", "[", "'y0'", "]", ")", ")", "# If the source is within two pixels of the edge", "# of the target aperture, reject the target", "for", "j", "in", "[", "x", "-", "2", ",", "x", "-", "1", ",", "x", ",", "x", "+", "1", ",", "x", "+", "2", "]", ":", "if", "j", "<", "0", ":", "# Outside the postage stamp", "continue", "for", "i", "in", "[", "y", "-", "2", ",", "y", "-", "1", ",", "y", ",", "y", "+", "1", ",", "y", "+", "2", "]", ":", "if", "i", "<", "0", ":", "# Outside the postage stamp", "continue", "try", ":", "if", "aperture", "[", "i", "]", "[", "j", "]", ":", "# Oh-oh!", "contam", "=", "True", "except", "IndexError", ":", "# Out of bounds... carry on!", "pass", "if", "contam", ":", "continue", "# HACK: This happens for K2SFF M67 targets in C05.", "# Let's skip them", "if", "aperture", ".", "shape", "!=", "fpix", ".", "shape", "[", "1", ":", "]", ":", "continue", "# Reject if the model is not present", "if", "model", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "model", "+", "'.npz'", ")", ")", ":", "continue", "# Reject if CDPP out of range", "if", "cdpp_range", "is", "not", "None", ":", "cdpp", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "TargetDirectory", "(", "star", ",", "campaign", ")", ",", "model", "+", "'.npz'", ")", ")", "[", "'cdpp'", "]", "if", "(", "cdpp", ">", "cdpp_hi", ")", "or", "(", "cdpp", "<", "cdpp_lo", ")", ":", "continue", "# Passed all the tests!", "targets", ".", "append", "(", "star", ")", "# Do we have enough? If so, return", "if", "len", "(", "targets", ")", "==", "neighbors", ":", "random", ".", "shuffle", "(", "targets", ")", "return", "targets", "# If we get to this point, we didn't find enough neighbors...", "# Return what we have anyway.", "return", "targets" ]
Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None`
[ "Return", "neighbors", "random", "bright", "stars", "on", "the", "same", "module", "as", "EPIC", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L715-L881
rodluger/everest
everest/missions/k2/k2.py
PlanetStatistics
def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all known K2 planets. :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or \ other K2 pipeline name ''' # Load all planet hosts f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv') epic, campaign, kp, _, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) campaign = np.array(campaign, dtype=int) cdpp = np.zeros(len(epic)) saturated = np.zeros(len(epic), dtype=int) cdpp_1 = np.zeros(len(epic)) # Get the stats for c in set(campaign): # Everest model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), model)) e0, _, _, c0, _, _, _, _, s0 = np.loadtxt(f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e0: j = np.argmax(e0 == e) cdpp[i] = c0[j] saturated[i] = s0[j] # Comparison model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), compare_to.lower())) if not os.path.exists(f): continue if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']: e1, c1 = np.loadtxt(f, unpack=True, skiprows=2) else: e1, _, _, c1, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e1: j = np.argmax(e1 == e) cdpp_1[i] = c1[j] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title( 'K2 Planet Hosts: %s versus %s' % (model, compare_to)) x = kp y = (cdpp - cdpp_1) / cdpp_1 ax.scatter(x[unsat], y[unsat], color='b', marker='.', alpha=0.5, zorder=-1, picker=True) ax.scatter(x[sat], y[sat], color='r', marker='.', alpha=0.5, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'K2 Planet Hosts', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Pickable points Picker = StatsPicker([ax], [kp], [y], epic, model=model, compare_to=compare_to) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
python
def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all known K2 planets. :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or \ other K2 pipeline name ''' # Load all planet hosts f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv') epic, campaign, kp, _, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) campaign = np.array(campaign, dtype=int) cdpp = np.zeros(len(epic)) saturated = np.zeros(len(epic), dtype=int) cdpp_1 = np.zeros(len(epic)) # Get the stats for c in set(campaign): # Everest model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), model)) e0, _, _, c0, _, _, _, _, s0 = np.loadtxt(f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e0: j = np.argmax(e0 == e) cdpp[i] = c0[j] saturated[i] = s0[j] # Comparison model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), compare_to.lower())) if not os.path.exists(f): continue if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']: e1, c1 = np.loadtxt(f, unpack=True, skiprows=2) else: e1, _, _, c1, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e1: j = np.argmax(e1 == e) cdpp_1[i] = c1[j] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title( 'K2 Planet Hosts: %s versus %s' % (model, compare_to)) x = kp y = (cdpp - cdpp_1) / cdpp_1 ax.scatter(x[unsat], y[unsat], color='b', marker='.', alpha=0.5, zorder=-1, picker=True) ax.scatter(x[sat], y[sat], color='r', marker='.', alpha=0.5, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'K2 Planet Hosts', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Pickable points Picker = StatsPicker([ax], [kp], [y], epic, model=model, compare_to=compare_to) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
[ "def", "PlanetStatistics", "(", "model", "=", "'nPLD'", ",", "compare_to", "=", "'k2sff'", ",", "*", "*", "kwargs", ")", ":", "# Load all planet hosts", "f", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'planets.tsv'", ")", "epic", ",", "campaign", ",", "kp", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", "=", "np", ".", "loadtxt", "(", "f", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "epic", "=", "np", ".", "array", "(", "epic", ",", "dtype", "=", "int", ")", "campaign", "=", "np", ".", "array", "(", "campaign", ",", "dtype", "=", "int", ")", "cdpp", "=", "np", ".", "zeros", "(", "len", "(", "epic", ")", ")", "saturated", "=", "np", ".", "zeros", "(", "len", "(", "epic", ")", ",", "dtype", "=", "int", ")", "cdpp_1", "=", "np", ".", "zeros", "(", "len", "(", "epic", ")", ")", "# Get the stats", "for", "c", "in", "set", "(", "campaign", ")", ":", "# Everest model", "f", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "c", ")", ",", "model", ")", ")", "e0", ",", "_", ",", "_", ",", "c0", ",", "_", ",", "_", ",", "_", ",", "_", ",", "s0", "=", "np", ".", "loadtxt", "(", "f", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "for", "i", ",", "e", "in", "enumerate", "(", "epic", ")", ":", "if", "e", "in", "e0", ":", "j", "=", "np", ".", "argmax", "(", "e0", "==", "e", ")", "cdpp", "[", "i", "]", "=", "c0", "[", "j", "]", "saturated", "[", "i", "]", "=", "s0", "[", "j", "]", "# Comparison model", "f", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "c", ")", ",", "compare_to", ".", "lower", "(", ")", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "continue", "if", "compare_to", ".", "lower", "(", ")", "in", "[", "'everest1'", ",", "'k2sff'", ",", "'k2sc'", "]", ":", "e1", ",", "c1", "=", "np", ".", "loadtxt", "(", "f", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "else", ":", "e1", ",", "_", ",", "_", ",", "c1", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", "=", "np", ".", "loadtxt", "(", "f", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "for", "i", ",", "e", "in", "enumerate", "(", "epic", ")", ":", "if", "e", "in", "e1", ":", "j", "=", "np", ".", "argmax", "(", "e1", "==", "e", ")", "cdpp_1", "[", "i", "]", "=", "c1", "[", "j", "]", "sat", "=", "np", ".", "where", "(", "saturated", "==", "1", ")", "unsat", "=", "np", ".", "where", "(", "saturated", "==", "0", ")", "# Plot the equivalent of the Aigrain+16 figure", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'K2 Planet Hosts: %s versus %s'", "%", "(", "model", ",", "compare_to", ")", ")", "x", "=", "kp", "y", "=", "(", "cdpp", "-", "cdpp_1", ")", "/", "cdpp_1", "ax", ".", "scatter", "(", "x", "[", "unsat", "]", ",", "y", "[", "unsat", "]", ",", "color", "=", "'b'", ",", "marker", "=", "'.'", ",", "alpha", "=", "0.5", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", ".", "scatter", "(", "x", "[", "sat", "]", ",", "y", "[", "sat", "]", ",", "color", "=", "'r'", ",", "marker", "=", "'.'", ",", "alpha", "=", "0.5", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", ".", "set_ylim", "(", "-", "1", ",", "1", ")", "ax", ".", "set_xlim", "(", "8", ",", "18", ")", "ax", ".", "axhline", "(", "0", ",", "color", "=", "'gray'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "axhline", "(", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "axhline", "(", "-", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "set_title", "(", "r'K2 Planet Hosts'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_ylabel", "(", "r'Relative CDPP'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_xlabel", "(", "'Kepler Magnitude'", ",", "fontsize", "=", "18", ")", "# Pickable points", "Picker", "=", "StatsPicker", "(", "[", "ax", "]", ",", "[", "kp", "]", ",", "[", "y", "]", ",", "epic", ",", "model", "=", "model", ",", "compare_to", "=", "compare_to", ")", "fig", ".", "canvas", ".", "mpl_connect", "(", "'pick_event'", ",", "Picker", ")", "# Show", "pl", ".", "show", "(", ")" ]
Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all known K2 planets. :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or \ other K2 pipeline name
[ "Computes", "and", "plots", "the", "CDPP", "statistics", "comparison", "between", "model", "and", "compare_to", "for", "all", "known", "K2", "planets", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L884-L961
rodluger/everest
everest/missions/k2/k2.py
ShortCadenceStatistics
def ShortCadenceStatistics(campaign=None, clobber=False, model='nPLD', plot=True, **kwargs): ''' Computes and plots the CDPP statistics comparison between short cadence and long cadence de-trended light curves :param campaign: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` ''' # Check campaign if campaign is None: campaign = np.arange(9) else: campaign = np.atleast_1d(campaign) # Update model name model = '%s.sc' % model # Compute the statistics for camp in campaign: sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP " + "Everest CDPP Saturated", file=f) print("--------- ------ --------- " + "------------ ---------", file=f) all = GetK2Campaign(int(camp), cadence='sc') stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write( '\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % camp, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], data['cdpp'][()], int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, 0), file=f) print("") if not plot: return # Running lists xsat = [] ysat = [] xunsat = [] yunsat = [] xall = [] yall = [] epics = [] # Plot for camp in campaign: # Load all stars sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) epic, kp, cdpp6r, cdpp6, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcamp inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: camp 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): continue # Get the long cadence stats compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model[:-3])) epic_1, _, _, cdpp6_1, _, _, _, _, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 # Append to running lists xsat.extend(x[sat]) ysat.extend(y[sat]) xunsat.extend(x[unsat]) yunsat.extend(y[unsat]) xall.extend(x) yall.extend(y) epics.extend(epic) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title('K2 Short Cadence') ax.scatter(xunsat, yunsat, color='b', marker='.', alpha=0.35, zorder=-1, picker=True) ax.scatter(xsat, ysat, color='r', marker='.', alpha=0.35, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'Short Versus Long Cadence', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Bin the CDPP yall = np.array(yall) xall = np.array(xall) bins = np.arange(7.5, 18.5, 0.5) by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((yall > -np.inf) & (yall < np.inf) & (xall >= bin - 0.5) & (xall < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(yall[i]) ax.plot(bins[:9], by[:9], 'r--', lw=2) ax.plot(bins[8:], by[8:], 'k-', lw=2) # Pickable points Picker = StatsPicker([ax], [xall], [yall], epics, model=model[:-3], compare_to=model[:-3], cadence='sc', campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
python
def ShortCadenceStatistics(campaign=None, clobber=False, model='nPLD', plot=True, **kwargs): ''' Computes and plots the CDPP statistics comparison between short cadence and long cadence de-trended light curves :param campaign: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` ''' # Check campaign if campaign is None: campaign = np.arange(9) else: campaign = np.atleast_1d(campaign) # Update model name model = '%s.sc' % model # Compute the statistics for camp in campaign: sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP " + "Everest CDPP Saturated", file=f) print("--------- ------ --------- " + "------------ ---------", file=f) all = GetK2Campaign(int(camp), cadence='sc') stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write( '\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % camp, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], data['cdpp'][()], int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, 0), file=f) print("") if not plot: return # Running lists xsat = [] ysat = [] xunsat = [] yunsat = [] xall = [] yall = [] epics = [] # Plot for camp in campaign: # Load all stars sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) epic, kp, cdpp6r, cdpp6, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcamp inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: camp 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): continue # Get the long cadence stats compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model[:-3])) epic_1, _, _, cdpp6_1, _, _, _, _, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 # Append to running lists xsat.extend(x[sat]) ysat.extend(y[sat]) xunsat.extend(x[unsat]) yunsat.extend(y[unsat]) xall.extend(x) yall.extend(y) epics.extend(epic) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title('K2 Short Cadence') ax.scatter(xunsat, yunsat, color='b', marker='.', alpha=0.35, zorder=-1, picker=True) ax.scatter(xsat, ysat, color='r', marker='.', alpha=0.35, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'Short Versus Long Cadence', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Bin the CDPP yall = np.array(yall) xall = np.array(xall) bins = np.arange(7.5, 18.5, 0.5) by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((yall > -np.inf) & (yall < np.inf) & (xall >= bin - 0.5) & (xall < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(yall[i]) ax.plot(bins[:9], by[:9], 'r--', lw=2) ax.plot(bins[8:], by[8:], 'k-', lw=2) # Pickable points Picker = StatsPicker([ax], [xall], [yall], epics, model=model[:-3], compare_to=model[:-3], cadence='sc', campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
[ "def", "ShortCadenceStatistics", "(", "campaign", "=", "None", ",", "clobber", "=", "False", ",", "model", "=", "'nPLD'", ",", "plot", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Check campaign", "if", "campaign", "is", "None", ":", "campaign", "=", "np", ".", "arange", "(", "9", ")", "else", ":", "campaign", "=", "np", ".", "atleast_1d", "(", "campaign", ")", "# Update model name", "model", "=", "'%s.sc'", "%", "model", "# Compute the statistics", "for", "camp", "in", "campaign", ":", "sub", "=", "np", ".", "array", "(", "GetK2Campaign", "(", "camp", ",", "cadence", "=", "'sc'", ",", "epics_only", "=", "True", ")", ",", "dtype", "=", "int", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "camp", ")", ",", "model", ")", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "f", ":", "print", "(", "\"EPIC Kp Raw CDPP \"", "+", "\"Everest CDPP Saturated\"", ",", "file", "=", "f", ")", "print", "(", "\"--------- ------ --------- \"", "+", "\"------------ ---------\"", ",", "file", "=", "f", ")", "all", "=", "GetK2Campaign", "(", "int", "(", "camp", ")", ",", "cadence", "=", "'sc'", ")", "stars", "=", "np", ".", "array", "(", "[", "s", "[", "0", "]", "for", "s", "in", "all", "]", ",", "dtype", "=", "int", ")", "kpmgs", "=", "np", ".", "array", "(", "[", "s", "[", "1", "]", "for", "s", "in", "all", "]", ",", "dtype", "=", "float", ")", "for", "i", ",", "_", "in", "enumerate", "(", "stars", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\rProcessing target %d/%d...'", "%", "(", "i", "+", "1", ",", "len", "(", "stars", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "nf", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "camp", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", "4", ":", "]", ",", "model", "+", "'.npz'", ")", "try", ":", "data", "=", "np", ".", "load", "(", "nf", ")", "print", "(", "\"{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}\"", ".", "format", "(", "stars", "[", "i", "]", ",", "kpmgs", "[", "i", "]", ",", "data", "[", "'cdppr'", "]", "[", "(", ")", "]", ",", "data", "[", "'cdpp'", "]", "[", "(", ")", "]", ",", "int", "(", "data", "[", "'saturated'", "]", ")", ")", ",", "file", "=", "f", ")", "except", ":", "print", "(", "\"{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}\"", ".", "format", "(", "stars", "[", "i", "]", ",", "kpmgs", "[", "i", "]", ",", "np", ".", "nan", ",", "np", ".", "nan", ",", "0", ")", ",", "file", "=", "f", ")", "print", "(", "\"\"", ")", "if", "not", "plot", ":", "return", "# Running lists", "xsat", "=", "[", "]", "ysat", "=", "[", "]", "xunsat", "=", "[", "]", "yunsat", "=", "[", "]", "xall", "=", "[", "]", "yall", "=", "[", "]", "epics", "=", "[", "]", "# Plot", "for", "camp", "in", "campaign", ":", "# Load all stars", "sub", "=", "np", ".", "array", "(", "GetK2Campaign", "(", "camp", ",", "cadence", "=", "'sc'", ",", "epics_only", "=", "True", ")", ",", "dtype", "=", "int", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "camp", ")", ",", "model", ")", ")", "epic", ",", "kp", ",", "cdpp6r", ",", "cdpp6", ",", "saturated", "=", "np", ".", "loadtxt", "(", "outfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "epic", "=", "np", ".", "array", "(", "epic", ",", "dtype", "=", "int", ")", "saturated", "=", "np", ".", "array", "(", "saturated", ",", "dtype", "=", "int", ")", "# Get only stars in this subcamp", "inds", "=", "np", ".", "array", "(", "[", "e", "in", "sub", "for", "e", "in", "epic", "]", ")", "epic", "=", "epic", "[", "inds", "]", "kp", "=", "kp", "[", "inds", "]", "# HACK: camp 0 magnitudes are reported only to the nearest tenth,", "# so let's add a little noise to spread them out for nicer plotting", "kp", "=", "kp", "+", "0.1", "*", "(", "0.5", "-", "np", ".", "random", ".", "random", "(", "len", "(", "kp", ")", ")", ")", "cdpp6r", "=", "cdpp6r", "[", "inds", "]", "cdpp6", "=", "cdpp6", "[", "inds", "]", "saturated", "=", "saturated", "[", "inds", "]", "sat", "=", "np", ".", "where", "(", "saturated", "==", "1", ")", "unsat", "=", "np", ".", "where", "(", "saturated", "==", "0", ")", "if", "not", "np", ".", "any", "(", "[", "not", "np", ".", "isnan", "(", "x", ")", "for", "x", "in", "cdpp6", "]", ")", ":", "continue", "# Get the long cadence stats", "compfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "camp", ")", ",", "model", "[", ":", "-", "3", "]", ")", ")", "epic_1", ",", "_", ",", "_", ",", "cdpp6_1", ",", "_", ",", "_", ",", "_", ",", "_", ",", "saturated", "=", "np", ".", "loadtxt", "(", "compfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "epic_1", "=", "np", ".", "array", "(", "epic_1", ",", "dtype", "=", "int", ")", "inds", "=", "np", ".", "array", "(", "[", "e", "in", "sub", "for", "e", "in", "epic_1", "]", ")", "epic_1", "=", "epic_1", "[", "inds", "]", "cdpp6_1", "=", "cdpp6_1", "[", "inds", "]", "cdpp6_1", "=", "sort_like", "(", "cdpp6_1", ",", "epic", ",", "epic_1", ")", "x", "=", "kp", "y", "=", "(", "cdpp6", "-", "cdpp6_1", ")", "/", "cdpp6_1", "# Append to running lists", "xsat", ".", "extend", "(", "x", "[", "sat", "]", ")", "ysat", ".", "extend", "(", "y", "[", "sat", "]", ")", "xunsat", ".", "extend", "(", "x", "[", "unsat", "]", ")", "yunsat", ".", "extend", "(", "y", "[", "unsat", "]", ")", "xall", ".", "extend", "(", "x", ")", "yall", ".", "extend", "(", "y", ")", "epics", ".", "extend", "(", "epic", ")", "# Plot the equivalent of the Aigrain+16 figure", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "1", ")", "fig", ".", "canvas", ".", "set_window_title", "(", "'K2 Short Cadence'", ")", "ax", ".", "scatter", "(", "xunsat", ",", "yunsat", ",", "color", "=", "'b'", ",", "marker", "=", "'.'", ",", "alpha", "=", "0.35", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", ".", "scatter", "(", "xsat", ",", "ysat", ",", "color", "=", "'r'", ",", "marker", "=", "'.'", ",", "alpha", "=", "0.35", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", ".", "set_ylim", "(", "-", "1", ",", "1", ")", "ax", ".", "set_xlim", "(", "8", ",", "18", ")", "ax", ".", "axhline", "(", "0", ",", "color", "=", "'gray'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "axhline", "(", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "axhline", "(", "-", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", ".", "set_title", "(", "r'Short Versus Long Cadence'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_ylabel", "(", "r'Relative CDPP'", ",", "fontsize", "=", "18", ")", "ax", ".", "set_xlabel", "(", "'Kepler Magnitude'", ",", "fontsize", "=", "18", ")", "# Bin the CDPP", "yall", "=", "np", ".", "array", "(", "yall", ")", "xall", "=", "np", ".", "array", "(", "xall", ")", "bins", "=", "np", ".", "arange", "(", "7.5", ",", "18.5", ",", "0.5", ")", "by", "=", "np", ".", "zeros_like", "(", "bins", ")", "*", "np", ".", "nan", "for", "b", ",", "bin", "in", "enumerate", "(", "bins", ")", ":", "i", "=", "np", ".", "where", "(", "(", "yall", ">", "-", "np", ".", "inf", ")", "&", "(", "yall", "<", "np", ".", "inf", ")", "&", "(", "xall", ">=", "bin", "-", "0.5", ")", "&", "(", "xall", "<", "bin", "+", "0.5", ")", ")", "[", "0", "]", "if", "len", "(", "i", ")", ">", "10", ":", "by", "[", "b", "]", "=", "np", ".", "median", "(", "yall", "[", "i", "]", ")", "ax", ".", "plot", "(", "bins", "[", ":", "9", "]", ",", "by", "[", ":", "9", "]", ",", "'r--'", ",", "lw", "=", "2", ")", "ax", ".", "plot", "(", "bins", "[", "8", ":", "]", ",", "by", "[", "8", ":", "]", ",", "'k-'", ",", "lw", "=", "2", ")", "# Pickable points", "Picker", "=", "StatsPicker", "(", "[", "ax", "]", ",", "[", "xall", "]", ",", "[", "yall", "]", ",", "epics", ",", "model", "=", "model", "[", ":", "-", "3", "]", ",", "compare_to", "=", "model", "[", ":", "-", "3", "]", ",", "cadence", "=", "'sc'", ",", "campaign", "=", "campaign", ")", "fig", ".", "canvas", ".", "mpl_connect", "(", "'pick_event'", ",", "Picker", ")", "# Show", "pl", ".", "show", "(", ")" ]
Computes and plots the CDPP statistics comparison between short cadence and long cadence de-trended light curves :param campaign: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True`
[ "Computes", "and", "plots", "the", "CDPP", "statistics", "comparison", "between", "short", "cadence", "and", "long", "cadence", "de", "-", "trended", "light", "curves" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L964-L1119
rodluger/everest
everest/missions/k2/k2.py
Statistics
def Statistics(season=None, clobber=False, model='nPLD', injection=False, compare_to='kepler', plot=True, cadence='lc', planets=False, **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all long cadence light curves in a given campaign :param season: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or other \ K2 pipeline name :param bool plot: Default :py:obj:`True` :param bool injection: Statistics for injection tests? Default \ :py:obj:`False` :param bool planets: Statistics for known K2 planets? \ Default :py:obj:`False` ''' # Multi-mission compatibility campaign = season # Is this short cadence? if cadence == 'sc': return ShortCadenceStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Check the campaign if campaign is None: campaign = 0 # Planet hosts only? if planets: return PlanetStatistics(model=model, compare_to=compare_to, **kwargs) # Is this an injection run? if injection: return InjectionStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Compute the statistics sub = np.array([s[0] for s in GetK2Campaign(campaign)], dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP Everest CDPP" + " Validation Outliers[1] Outliers[2] " + "Datapoints Saturated", file=f) print("--------- ------ --------- ------------" + " ---------- ----------- ----------- " + "---------- ---------", file=f) all = GetK2Campaign(int(campaign)) stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) # Remove NaNs and flagged cadences flux = np.delete(data['fraw'] - data['model'], np.array( list(set(np.concatenate([data['nanmask'], data['badmask']]))))) # Iterative sigma clipping to get 5 sigma outliers inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) ff = SavGol(np.delete(flux, inds)) med = np.nanmedian(ff) MAD = 1.4826 * np.nanmedian(np.abs(ff - med)) inds = np.append(inds, np.where( (ff > med + 5. * MAD) | (ff < med - 5. * MAD))[0]) nout = len(inds) ntot = len(flux) # HACK: Backwards compatibility fix try: cdpp = data['cdpp'][()] except KeyError: cdpp = data['cdpp6'][()] print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], cdpp, data['cdppv'][()], len(data['outmask']), nout, ntot, int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, np.nan, 0, 0, 0, 0), file=f) print("") if plot: # Load all stars epic, kp, cdpp6r, cdpp6, cdpp6v, _, out, tot, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) out = np.array(out, dtype=int) tot = np.array(tot, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcampaign inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: Campaign 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] cdpp6v = cdpp6v[inds] out = out[inds] tot = tot[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): raise Exception("No targets to plot.") # Control transparency alpha_kepler = 0.03 alpha_unsat = min(0.1, 2000. / (1 + len(unsat[0]))) alpha_sat = min(1., 180. / (1 + len(sat[0]))) # Get the comparison model stats if compare_to.lower() == 'everest1': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sc': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sff': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'kepler': kic, kepler_kp, kepler_cdpp6 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'kepler.cdpp'), unpack=True) else: compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), compare_to)) epic_1, _, _, cdpp6_1, _, _, out_1, tot_1, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] out_1 = out_1[inds] tot_1 = tot_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) # ------ 1. Plot cdpp vs. mag if compare_to.lower() != 'kepler': fig = pl.figure(figsize=(16, 5)) ax = [pl.subplot2grid((120, 120), (0, 0), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 40), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 80), colspan=35, rowspan=55), pl.subplot2grid((120, 120), (65, 80), colspan=35, rowspan=55)] else: fig = pl.figure(figsize=(12, 5)) ax = [pl.subplot2grid((120, 75), (0, 0), colspan=35, rowspan=120), None, pl.subplot2grid((120, 75), (0, 40), colspan=35, rowspan=55), pl.subplot2grid((120, 75), (65, 40), colspan=35, rowspan=55)] fig.canvas.set_window_title( 'K2 Campaign %s: %s versus %s' % (campaign, model, compare_to)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.125, top=0.9) bins = np.arange(7.5, 18.5, 0.5) if compare_to.lower() != 'kepler': ax[0].scatter(kp[unsat], cdpp6_1[unsat], color='y', marker='.', alpha=alpha_unsat) ax[0].scatter(kp[sat], cdpp6_1[sat], color='y', marker='s', alpha=alpha_sat, s=5) ax[0].scatter(kp[unsat], cdpp6[unsat], color='b', marker='.', alpha=alpha_unsat, picker=True) ax[0].scatter(kp[sat], cdpp6[sat], color='b', marker='s', alpha=alpha_sat, s=5, picker=True) for y, style in zip([cdpp6_1, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (kp >= bin - 0.5) & (kp < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') else: ax[0].scatter(kepler_kp, kepler_cdpp6, color='y', marker='.', alpha=alpha_kepler) ax[0].scatter(kp, cdpp6, color='b', marker='.', alpha=alpha_unsat, picker=True) for x, y, style in zip([kepler_kp, kp], [kepler_cdpp6, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') ax[0].set_ylim(-10, 500) ax[0].set_xlim(8, 18) ax[0].set_xlabel('Kepler Magnitude', fontsize=18) ax[0].set_title('CDPP6 (ppm)', fontsize=18) # ------ 2. Plot the equivalent of the Aigrain+16 figure if compare_to.lower() != 'kepler': x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 yv = (cdpp6v - cdpp6_1) / cdpp6_1 ax[1].scatter(x[unsat], y[unsat], color='b', marker='.', alpha=alpha_unsat, zorder=-1, picker=True) ax[1].scatter(x[sat], y[sat], color='r', marker='.', alpha=alpha_sat, zorder=-1, picker=True) ax[1].set_ylim(-1, 1) ax[1].set_xlim(8, 18) ax[1].axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) bins = np.arange(7.5, 18.5, 0.5) # Bin the CDPP by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[1].plot(bins[:9], by[:9], 'k--', lw=2) ax[1].plot(bins[8:], by[8:], 'k-', lw=2) ax[1].set_title(r'Relative CDPP', fontsize=18) ax[1].set_xlabel('Kepler Magnitude', fontsize=18) # ------ 3. Plot the outliers i = np.argsort(out) a = int(0.95 * len(out)) omax = out[i][a] if compare_to.lower() != 'kepler': j = np.argsort(out_1) b = int(0.95 * len(out_1)) omax = max(omax, out_1[j][b]) ax[2].hist(out, 25, range=(0, omax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[2].hist(out_1, 25, range=(0, omax), histtype='step', color='y') ax[2].margins(0, None) ax[2].set_title('Number of Outliers', fontsize=18) # Plot the total number of data points i = np.argsort(tot) a = int(0.05 * len(tot)) b = int(0.95 * len(tot)) tmin = tot[i][a] tmax = tot[i][b] if compare_to.lower() != 'kepler': j = np.argsort(tot_1) c = int(0.05 * len(tot_1)) d = int(0.95 * len(tot_1)) tmin = min(tmin, tot_1[j][c]) tmax = max(tmax, tot_1[j][d]) ax[3].hist(tot, 25, range=(tmin, tmax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[3].hist(tot_1, 25, range=(tmin, tmax), histtype='step', color='y') ax[3].margins(0, None) ax[3].set_xlabel('Number of Data Points', fontsize=18) # Pickable points Picker = StatsPicker([ax[0], ax[1]], [kp, kp], [ cdpp6, y], epic, model=model, compare_to=compare_to, campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
python
def Statistics(season=None, clobber=False, model='nPLD', injection=False, compare_to='kepler', plot=True, cadence='lc', planets=False, **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all long cadence light curves in a given campaign :param season: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or other \ K2 pipeline name :param bool plot: Default :py:obj:`True` :param bool injection: Statistics for injection tests? Default \ :py:obj:`False` :param bool planets: Statistics for known K2 planets? \ Default :py:obj:`False` ''' # Multi-mission compatibility campaign = season # Is this short cadence? if cadence == 'sc': return ShortCadenceStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Check the campaign if campaign is None: campaign = 0 # Planet hosts only? if planets: return PlanetStatistics(model=model, compare_to=compare_to, **kwargs) # Is this an injection run? if injection: return InjectionStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Compute the statistics sub = np.array([s[0] for s in GetK2Campaign(campaign)], dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP Everest CDPP" + " Validation Outliers[1] Outliers[2] " + "Datapoints Saturated", file=f) print("--------- ------ --------- ------------" + " ---------- ----------- ----------- " + "---------- ---------", file=f) all = GetK2Campaign(int(campaign)) stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) # Remove NaNs and flagged cadences flux = np.delete(data['fraw'] - data['model'], np.array( list(set(np.concatenate([data['nanmask'], data['badmask']]))))) # Iterative sigma clipping to get 5 sigma outliers inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) ff = SavGol(np.delete(flux, inds)) med = np.nanmedian(ff) MAD = 1.4826 * np.nanmedian(np.abs(ff - med)) inds = np.append(inds, np.where( (ff > med + 5. * MAD) | (ff < med - 5. * MAD))[0]) nout = len(inds) ntot = len(flux) # HACK: Backwards compatibility fix try: cdpp = data['cdpp'][()] except KeyError: cdpp = data['cdpp6'][()] print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], cdpp, data['cdppv'][()], len(data['outmask']), nout, ntot, int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, np.nan, 0, 0, 0, 0), file=f) print("") if plot: # Load all stars epic, kp, cdpp6r, cdpp6, cdpp6v, _, out, tot, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) out = np.array(out, dtype=int) tot = np.array(tot, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcampaign inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: Campaign 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] cdpp6v = cdpp6v[inds] out = out[inds] tot = tot[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): raise Exception("No targets to plot.") # Control transparency alpha_kepler = 0.03 alpha_unsat = min(0.1, 2000. / (1 + len(unsat[0]))) alpha_sat = min(1., 180. / (1 + len(sat[0]))) # Get the comparison model stats if compare_to.lower() == 'everest1': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sc': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sff': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'kepler': kic, kepler_kp, kepler_cdpp6 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'kepler.cdpp'), unpack=True) else: compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), compare_to)) epic_1, _, _, cdpp6_1, _, _, out_1, tot_1, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] out_1 = out_1[inds] tot_1 = tot_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) # ------ 1. Plot cdpp vs. mag if compare_to.lower() != 'kepler': fig = pl.figure(figsize=(16, 5)) ax = [pl.subplot2grid((120, 120), (0, 0), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 40), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 80), colspan=35, rowspan=55), pl.subplot2grid((120, 120), (65, 80), colspan=35, rowspan=55)] else: fig = pl.figure(figsize=(12, 5)) ax = [pl.subplot2grid((120, 75), (0, 0), colspan=35, rowspan=120), None, pl.subplot2grid((120, 75), (0, 40), colspan=35, rowspan=55), pl.subplot2grid((120, 75), (65, 40), colspan=35, rowspan=55)] fig.canvas.set_window_title( 'K2 Campaign %s: %s versus %s' % (campaign, model, compare_to)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.125, top=0.9) bins = np.arange(7.5, 18.5, 0.5) if compare_to.lower() != 'kepler': ax[0].scatter(kp[unsat], cdpp6_1[unsat], color='y', marker='.', alpha=alpha_unsat) ax[0].scatter(kp[sat], cdpp6_1[sat], color='y', marker='s', alpha=alpha_sat, s=5) ax[0].scatter(kp[unsat], cdpp6[unsat], color='b', marker='.', alpha=alpha_unsat, picker=True) ax[0].scatter(kp[sat], cdpp6[sat], color='b', marker='s', alpha=alpha_sat, s=5, picker=True) for y, style in zip([cdpp6_1, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (kp >= bin - 0.5) & (kp < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') else: ax[0].scatter(kepler_kp, kepler_cdpp6, color='y', marker='.', alpha=alpha_kepler) ax[0].scatter(kp, cdpp6, color='b', marker='.', alpha=alpha_unsat, picker=True) for x, y, style in zip([kepler_kp, kp], [kepler_cdpp6, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') ax[0].set_ylim(-10, 500) ax[0].set_xlim(8, 18) ax[0].set_xlabel('Kepler Magnitude', fontsize=18) ax[0].set_title('CDPP6 (ppm)', fontsize=18) # ------ 2. Plot the equivalent of the Aigrain+16 figure if compare_to.lower() != 'kepler': x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 yv = (cdpp6v - cdpp6_1) / cdpp6_1 ax[1].scatter(x[unsat], y[unsat], color='b', marker='.', alpha=alpha_unsat, zorder=-1, picker=True) ax[1].scatter(x[sat], y[sat], color='r', marker='.', alpha=alpha_sat, zorder=-1, picker=True) ax[1].set_ylim(-1, 1) ax[1].set_xlim(8, 18) ax[1].axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) bins = np.arange(7.5, 18.5, 0.5) # Bin the CDPP by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[1].plot(bins[:9], by[:9], 'k--', lw=2) ax[1].plot(bins[8:], by[8:], 'k-', lw=2) ax[1].set_title(r'Relative CDPP', fontsize=18) ax[1].set_xlabel('Kepler Magnitude', fontsize=18) # ------ 3. Plot the outliers i = np.argsort(out) a = int(0.95 * len(out)) omax = out[i][a] if compare_to.lower() != 'kepler': j = np.argsort(out_1) b = int(0.95 * len(out_1)) omax = max(omax, out_1[j][b]) ax[2].hist(out, 25, range=(0, omax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[2].hist(out_1, 25, range=(0, omax), histtype='step', color='y') ax[2].margins(0, None) ax[2].set_title('Number of Outliers', fontsize=18) # Plot the total number of data points i = np.argsort(tot) a = int(0.05 * len(tot)) b = int(0.95 * len(tot)) tmin = tot[i][a] tmax = tot[i][b] if compare_to.lower() != 'kepler': j = np.argsort(tot_1) c = int(0.05 * len(tot_1)) d = int(0.95 * len(tot_1)) tmin = min(tmin, tot_1[j][c]) tmax = max(tmax, tot_1[j][d]) ax[3].hist(tot, 25, range=(tmin, tmax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[3].hist(tot_1, 25, range=(tmin, tmax), histtype='step', color='y') ax[3].margins(0, None) ax[3].set_xlabel('Number of Data Points', fontsize=18) # Pickable points Picker = StatsPicker([ax[0], ax[1]], [kp, kp], [ cdpp6, y], epic, model=model, compare_to=compare_to, campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
[ "def", "Statistics", "(", "season", "=", "None", ",", "clobber", "=", "False", ",", "model", "=", "'nPLD'", ",", "injection", "=", "False", ",", "compare_to", "=", "'kepler'", ",", "plot", "=", "True", ",", "cadence", "=", "'lc'", ",", "planets", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Multi-mission compatibility", "campaign", "=", "season", "# Is this short cadence?", "if", "cadence", "==", "'sc'", ":", "return", "ShortCadenceStatistics", "(", "campaign", "=", "campaign", ",", "clobber", "=", "clobber", ",", "model", "=", "model", ",", "plot", "=", "plot", ",", "*", "*", "kwargs", ")", "# Check the campaign", "if", "campaign", "is", "None", ":", "campaign", "=", "0", "# Planet hosts only?", "if", "planets", ":", "return", "PlanetStatistics", "(", "model", "=", "model", ",", "compare_to", "=", "compare_to", ",", "*", "*", "kwargs", ")", "# Is this an injection run?", "if", "injection", ":", "return", "InjectionStatistics", "(", "campaign", "=", "campaign", ",", "clobber", "=", "clobber", ",", "model", "=", "model", ",", "plot", "=", "plot", ",", "*", "*", "kwargs", ")", "# Compute the statistics", "sub", "=", "np", ".", "array", "(", "[", "s", "[", "0", "]", "for", "s", "in", "GetK2Campaign", "(", "campaign", ")", "]", ",", "dtype", "=", "int", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "campaign", ")", ",", "model", ")", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "f", ":", "print", "(", "\"EPIC Kp Raw CDPP Everest CDPP\"", "+", "\" Validation Outliers[1] Outliers[2] \"", "+", "\"Datapoints Saturated\"", ",", "file", "=", "f", ")", "print", "(", "\"--------- ------ --------- ------------\"", "+", "\" ---------- ----------- ----------- \"", "+", "\"---------- ---------\"", ",", "file", "=", "f", ")", "all", "=", "GetK2Campaign", "(", "int", "(", "campaign", ")", ")", "stars", "=", "np", ".", "array", "(", "[", "s", "[", "0", "]", "for", "s", "in", "all", "]", ",", "dtype", "=", "int", ")", "kpmgs", "=", "np", ".", "array", "(", "[", "s", "[", "1", "]", "for", "s", "in", "all", "]", ",", "dtype", "=", "float", ")", "for", "i", ",", "_", "in", "enumerate", "(", "stars", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\rProcessing target %d/%d...'", "%", "(", "i", "+", "1", ",", "len", "(", "stars", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "nf", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "campaign", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", "4", ":", "]", ",", "model", "+", "'.npz'", ")", "try", ":", "data", "=", "np", ".", "load", "(", "nf", ")", "# Remove NaNs and flagged cadences", "flux", "=", "np", ".", "delete", "(", "data", "[", "'fraw'", "]", "-", "data", "[", "'model'", "]", ",", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "data", "[", "'nanmask'", "]", ",", "data", "[", "'badmask'", "]", "]", ")", ")", ")", ")", ")", "# Iterative sigma clipping to get 5 sigma outliers", "inds", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "m", "=", "1", "while", "len", "(", "inds", ")", "<", "m", ":", "m", "=", "len", "(", "inds", ")", "ff", "=", "SavGol", "(", "np", ".", "delete", "(", "flux", ",", "inds", ")", ")", "med", "=", "np", ".", "nanmedian", "(", "ff", ")", "MAD", "=", "1.4826", "*", "np", ".", "nanmedian", "(", "np", ".", "abs", "(", "ff", "-", "med", ")", ")", "inds", "=", "np", ".", "append", "(", "inds", ",", "np", ".", "where", "(", "(", "ff", ">", "med", "+", "5.", "*", "MAD", ")", "|", "(", "ff", "<", "med", "-", "5.", "*", "MAD", ")", ")", "[", "0", "]", ")", "nout", "=", "len", "(", "inds", ")", "ntot", "=", "len", "(", "flux", ")", "# HACK: Backwards compatibility fix", "try", ":", "cdpp", "=", "data", "[", "'cdpp'", "]", "[", "(", ")", "]", "except", "KeyError", ":", "cdpp", "=", "data", "[", "'cdpp6'", "]", "[", "(", ")", "]", "print", "(", "\"{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}\"", ".", "format", "(", "stars", "[", "i", "]", ",", "kpmgs", "[", "i", "]", ",", "data", "[", "'cdppr'", "]", "[", "(", ")", "]", ",", "cdpp", ",", "data", "[", "'cdppv'", "]", "[", "(", ")", "]", ",", "len", "(", "data", "[", "'outmask'", "]", ")", ",", "nout", ",", "ntot", ",", "int", "(", "data", "[", "'saturated'", "]", ")", ")", ",", "file", "=", "f", ")", "except", ":", "print", "(", "\"{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}\"", ".", "format", "(", "stars", "[", "i", "]", ",", "kpmgs", "[", "i", "]", ",", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "nan", ",", "0", ",", "0", ",", "0", ",", "0", ")", ",", "file", "=", "f", ")", "print", "(", "\"\"", ")", "if", "plot", ":", "# Load all stars", "epic", ",", "kp", ",", "cdpp6r", ",", "cdpp6", ",", "cdpp6v", ",", "_", ",", "out", ",", "tot", ",", "saturated", "=", "np", ".", "loadtxt", "(", "outfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "epic", "=", "np", ".", "array", "(", "epic", ",", "dtype", "=", "int", ")", "out", "=", "np", ".", "array", "(", "out", ",", "dtype", "=", "int", ")", "tot", "=", "np", ".", "array", "(", "tot", ",", "dtype", "=", "int", ")", "saturated", "=", "np", ".", "array", "(", "saturated", ",", "dtype", "=", "int", ")", "# Get only stars in this subcampaign", "inds", "=", "np", ".", "array", "(", "[", "e", "in", "sub", "for", "e", "in", "epic", "]", ")", "epic", "=", "epic", "[", "inds", "]", "kp", "=", "kp", "[", "inds", "]", "# HACK: Campaign 0 magnitudes are reported only to the nearest tenth,", "# so let's add a little noise to spread them out for nicer plotting", "kp", "=", "kp", "+", "0.1", "*", "(", "0.5", "-", "np", ".", "random", ".", "random", "(", "len", "(", "kp", ")", ")", ")", "cdpp6r", "=", "cdpp6r", "[", "inds", "]", "cdpp6", "=", "cdpp6", "[", "inds", "]", "cdpp6v", "=", "cdpp6v", "[", "inds", "]", "out", "=", "out", "[", "inds", "]", "tot", "=", "tot", "[", "inds", "]", "saturated", "=", "saturated", "[", "inds", "]", "sat", "=", "np", ".", "where", "(", "saturated", "==", "1", ")", "unsat", "=", "np", ".", "where", "(", "saturated", "==", "0", ")", "if", "not", "np", ".", "any", "(", "[", "not", "np", ".", "isnan", "(", "x", ")", "for", "x", "in", "cdpp6", "]", ")", ":", "raise", "Exception", "(", "\"No targets to plot.\"", ")", "# Control transparency", "alpha_kepler", "=", "0.03", "alpha_unsat", "=", "min", "(", "0.1", ",", "2000.", "/", "(", "1", "+", "len", "(", "unsat", "[", "0", "]", ")", ")", ")", "alpha_sat", "=", "min", "(", "1.", ",", "180.", "/", "(", "1", "+", "len", "(", "sat", "[", "0", "]", ")", ")", ")", "# Get the comparison model stats", "if", "compare_to", ".", "lower", "(", ")", "==", "'everest1'", ":", "epic_1", ",", "cdpp6_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_everest1.cdpp'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "cdpp6_1", "=", "sort_like", "(", "cdpp6_1", ",", "epic", ",", "epic_1", ")", "# Outliers", "epic_1", ",", "out_1", ",", "tot_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_everest1.out'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "out_1", "=", "sort_like", "(", "out_1", ",", "epic", ",", "epic_1", ")", "tot_1", "=", "sort_like", "(", "tot_1", ",", "epic", ",", "epic_1", ")", "elif", "compare_to", ".", "lower", "(", ")", "==", "'k2sc'", ":", "epic_1", ",", "cdpp6_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_k2sc.cdpp'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "cdpp6_1", "=", "sort_like", "(", "cdpp6_1", ",", "epic", ",", "epic_1", ")", "# Outliers", "epic_1", ",", "out_1", ",", "tot_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_k2sc.out'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "out_1", "=", "sort_like", "(", "out_1", ",", "epic", ",", "epic_1", ")", "tot_1", "=", "sort_like", "(", "tot_1", ",", "epic", ",", "epic_1", ")", "elif", "compare_to", ".", "lower", "(", ")", "==", "'k2sff'", ":", "epic_1", ",", "cdpp6_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_k2sff.cdpp'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "cdpp6_1", "=", "sort_like", "(", "cdpp6_1", ",", "epic", ",", "epic_1", ")", "# Outliers", "epic_1", ",", "out_1", ",", "tot_1", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_k2sff.out'", "%", "int", "(", "campaign", ")", ")", ",", "unpack", "=", "True", ")", "out_1", "=", "sort_like", "(", "out_1", ",", "epic", ",", "epic_1", ")", "tot_1", "=", "sort_like", "(", "tot_1", ",", "epic", ",", "epic_1", ")", "elif", "compare_to", ".", "lower", "(", ")", "==", "'kepler'", ":", "kic", ",", "kepler_kp", ",", "kepler_cdpp6", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'kepler.cdpp'", ")", ",", "unpack", "=", "True", ")", "else", ":", "compfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "campaign", ")", ",", "compare_to", ")", ")", "epic_1", ",", "_", ",", "_", ",", "cdpp6_1", ",", "_", ",", "_", ",", "out_1", ",", "tot_1", ",", "saturated", "=", "np", ".", "loadtxt", "(", "compfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "epic_1", "=", "np", ".", "array", "(", "epic_1", ",", "dtype", "=", "int", ")", "inds", "=", "np", ".", "array", "(", "[", "e", "in", "sub", "for", "e", "in", "epic_1", "]", ")", "epic_1", "=", "epic_1", "[", "inds", "]", "cdpp6_1", "=", "cdpp6_1", "[", "inds", "]", "out_1", "=", "out_1", "[", "inds", "]", "tot_1", "=", "tot_1", "[", "inds", "]", "cdpp6_1", "=", "sort_like", "(", "cdpp6_1", ",", "epic", ",", "epic_1", ")", "out_1", "=", "sort_like", "(", "out_1", ",", "epic", ",", "epic_1", ")", "tot_1", "=", "sort_like", "(", "tot_1", ",", "epic", ",", "epic_1", ")", "# ------ 1. Plot cdpp vs. mag", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "fig", "=", "pl", ".", "figure", "(", "figsize", "=", "(", "16", ",", "5", ")", ")", "ax", "=", "[", "pl", ".", "subplot2grid", "(", "(", "120", ",", "120", ")", ",", "(", "0", ",", "0", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "120", ")", ",", "pl", ".", "subplot2grid", "(", "(", "120", ",", "120", ")", ",", "(", "0", ",", "40", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "120", ")", ",", "pl", ".", "subplot2grid", "(", "(", "120", ",", "120", ")", ",", "(", "0", ",", "80", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "55", ")", ",", "pl", ".", "subplot2grid", "(", "(", "120", ",", "120", ")", ",", "(", "65", ",", "80", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "55", ")", "]", "else", ":", "fig", "=", "pl", ".", "figure", "(", "figsize", "=", "(", "12", ",", "5", ")", ")", "ax", "=", "[", "pl", ".", "subplot2grid", "(", "(", "120", ",", "75", ")", ",", "(", "0", ",", "0", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "120", ")", ",", "None", ",", "pl", ".", "subplot2grid", "(", "(", "120", ",", "75", ")", ",", "(", "0", ",", "40", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "55", ")", ",", "pl", ".", "subplot2grid", "(", "(", "120", ",", "75", ")", ",", "(", "65", ",", "40", ")", ",", "colspan", "=", "35", ",", "rowspan", "=", "55", ")", "]", "fig", ".", "canvas", ".", "set_window_title", "(", "'K2 Campaign %s: %s versus %s'", "%", "(", "campaign", ",", "model", ",", "compare_to", ")", ")", "fig", ".", "subplots_adjust", "(", "left", "=", "0.05", ",", "right", "=", "0.95", ",", "bottom", "=", "0.125", ",", "top", "=", "0.9", ")", "bins", "=", "np", ".", "arange", "(", "7.5", ",", "18.5", ",", "0.5", ")", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "ax", "[", "0", "]", ".", "scatter", "(", "kp", "[", "unsat", "]", ",", "cdpp6_1", "[", "unsat", "]", ",", "color", "=", "'y'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_unsat", ")", "ax", "[", "0", "]", ".", "scatter", "(", "kp", "[", "sat", "]", ",", "cdpp6_1", "[", "sat", "]", ",", "color", "=", "'y'", ",", "marker", "=", "'s'", ",", "alpha", "=", "alpha_sat", ",", "s", "=", "5", ")", "ax", "[", "0", "]", ".", "scatter", "(", "kp", "[", "unsat", "]", ",", "cdpp6", "[", "unsat", "]", ",", "color", "=", "'b'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_unsat", ",", "picker", "=", "True", ")", "ax", "[", "0", "]", ".", "scatter", "(", "kp", "[", "sat", "]", ",", "cdpp6", "[", "sat", "]", ",", "color", "=", "'b'", ",", "marker", "=", "'s'", ",", "alpha", "=", "alpha_sat", ",", "s", "=", "5", ",", "picker", "=", "True", ")", "for", "y", ",", "style", "in", "zip", "(", "[", "cdpp6_1", ",", "cdpp6", "]", ",", "[", "'yo'", ",", "'bo'", "]", ")", ":", "by", "=", "np", ".", "zeros_like", "(", "bins", ")", "*", "np", ".", "nan", "for", "b", ",", "bin", "in", "enumerate", "(", "bins", ")", ":", "i", "=", "np", ".", "where", "(", "(", "y", ">", "-", "np", ".", "inf", ")", "&", "(", "y", "<", "np", ".", "inf", ")", "&", "(", "kp", ">=", "bin", "-", "0.5", ")", "&", "(", "kp", "<", "bin", "+", "0.5", ")", ")", "[", "0", "]", "if", "len", "(", "i", ")", ">", "10", ":", "by", "[", "b", "]", "=", "np", ".", "median", "(", "y", "[", "i", "]", ")", "ax", "[", "0", "]", ".", "plot", "(", "bins", ",", "by", ",", "style", ",", "markeredgecolor", "=", "'w'", ")", "else", ":", "ax", "[", "0", "]", ".", "scatter", "(", "kepler_kp", ",", "kepler_cdpp6", ",", "color", "=", "'y'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_kepler", ")", "ax", "[", "0", "]", ".", "scatter", "(", "kp", ",", "cdpp6", ",", "color", "=", "'b'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_unsat", ",", "picker", "=", "True", ")", "for", "x", ",", "y", ",", "style", "in", "zip", "(", "[", "kepler_kp", ",", "kp", "]", ",", "[", "kepler_cdpp6", ",", "cdpp6", "]", ",", "[", "'yo'", ",", "'bo'", "]", ")", ":", "by", "=", "np", ".", "zeros_like", "(", "bins", ")", "*", "np", ".", "nan", "for", "b", ",", "bin", "in", "enumerate", "(", "bins", ")", ":", "i", "=", "np", ".", "where", "(", "(", "y", ">", "-", "np", ".", "inf", ")", "&", "(", "y", "<", "np", ".", "inf", ")", "&", "(", "x", ">=", "bin", "-", "0.5", ")", "&", "(", "x", "<", "bin", "+", "0.5", ")", ")", "[", "0", "]", "if", "len", "(", "i", ")", ">", "10", ":", "by", "[", "b", "]", "=", "np", ".", "median", "(", "y", "[", "i", "]", ")", "ax", "[", "0", "]", ".", "plot", "(", "bins", ",", "by", ",", "style", ",", "markeredgecolor", "=", "'w'", ")", "ax", "[", "0", "]", ".", "set_ylim", "(", "-", "10", ",", "500", ")", "ax", "[", "0", "]", ".", "set_xlim", "(", "8", ",", "18", ")", "ax", "[", "0", "]", ".", "set_xlabel", "(", "'Kepler Magnitude'", ",", "fontsize", "=", "18", ")", "ax", "[", "0", "]", ".", "set_title", "(", "'CDPP6 (ppm)'", ",", "fontsize", "=", "18", ")", "# ------ 2. Plot the equivalent of the Aigrain+16 figure", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "x", "=", "kp", "y", "=", "(", "cdpp6", "-", "cdpp6_1", ")", "/", "cdpp6_1", "yv", "=", "(", "cdpp6v", "-", "cdpp6_1", ")", "/", "cdpp6_1", "ax", "[", "1", "]", ".", "scatter", "(", "x", "[", "unsat", "]", ",", "y", "[", "unsat", "]", ",", "color", "=", "'b'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_unsat", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", "[", "1", "]", ".", "scatter", "(", "x", "[", "sat", "]", ",", "y", "[", "sat", "]", ",", "color", "=", "'r'", ",", "marker", "=", "'.'", ",", "alpha", "=", "alpha_sat", ",", "zorder", "=", "-", "1", ",", "picker", "=", "True", ")", "ax", "[", "1", "]", ".", "set_ylim", "(", "-", "1", ",", "1", ")", "ax", "[", "1", "]", ".", "set_xlim", "(", "8", ",", "18", ")", "ax", "[", "1", "]", ".", "axhline", "(", "0", ",", "color", "=", "'gray'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", "[", "1", "]", ".", "axhline", "(", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "ax", "[", "1", "]", ".", "axhline", "(", "-", "0.5", ",", "color", "=", "'gray'", ",", "ls", "=", "'--'", ",", "lw", "=", "2", ",", "zorder", "=", "-", "99", ",", "alpha", "=", "0.5", ")", "bins", "=", "np", ".", "arange", "(", "7.5", ",", "18.5", ",", "0.5", ")", "# Bin the CDPP", "by", "=", "np", ".", "zeros_like", "(", "bins", ")", "*", "np", ".", "nan", "for", "b", ",", "bin", "in", "enumerate", "(", "bins", ")", ":", "i", "=", "np", ".", "where", "(", "(", "y", ">", "-", "np", ".", "inf", ")", "&", "(", "y", "<", "np", ".", "inf", ")", "&", "(", "x", ">=", "bin", "-", "0.5", ")", "&", "(", "x", "<", "bin", "+", "0.5", ")", ")", "[", "0", "]", "if", "len", "(", "i", ")", ">", "10", ":", "by", "[", "b", "]", "=", "np", ".", "median", "(", "y", "[", "i", "]", ")", "ax", "[", "1", "]", ".", "plot", "(", "bins", "[", ":", "9", "]", ",", "by", "[", ":", "9", "]", ",", "'k--'", ",", "lw", "=", "2", ")", "ax", "[", "1", "]", ".", "plot", "(", "bins", "[", "8", ":", "]", ",", "by", "[", "8", ":", "]", ",", "'k-'", ",", "lw", "=", "2", ")", "ax", "[", "1", "]", ".", "set_title", "(", "r'Relative CDPP'", ",", "fontsize", "=", "18", ")", "ax", "[", "1", "]", ".", "set_xlabel", "(", "'Kepler Magnitude'", ",", "fontsize", "=", "18", ")", "# ------ 3. Plot the outliers", "i", "=", "np", ".", "argsort", "(", "out", ")", "a", "=", "int", "(", "0.95", "*", "len", "(", "out", ")", ")", "omax", "=", "out", "[", "i", "]", "[", "a", "]", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "j", "=", "np", ".", "argsort", "(", "out_1", ")", "b", "=", "int", "(", "0.95", "*", "len", "(", "out_1", ")", ")", "omax", "=", "max", "(", "omax", ",", "out_1", "[", "j", "]", "[", "b", "]", ")", "ax", "[", "2", "]", ".", "hist", "(", "out", ",", "25", ",", "range", "=", "(", "0", ",", "omax", ")", ",", "histtype", "=", "'step'", ",", "color", "=", "'b'", ")", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "ax", "[", "2", "]", ".", "hist", "(", "out_1", ",", "25", ",", "range", "=", "(", "0", ",", "omax", ")", ",", "histtype", "=", "'step'", ",", "color", "=", "'y'", ")", "ax", "[", "2", "]", ".", "margins", "(", "0", ",", "None", ")", "ax", "[", "2", "]", ".", "set_title", "(", "'Number of Outliers'", ",", "fontsize", "=", "18", ")", "# Plot the total number of data points", "i", "=", "np", ".", "argsort", "(", "tot", ")", "a", "=", "int", "(", "0.05", "*", "len", "(", "tot", ")", ")", "b", "=", "int", "(", "0.95", "*", "len", "(", "tot", ")", ")", "tmin", "=", "tot", "[", "i", "]", "[", "a", "]", "tmax", "=", "tot", "[", "i", "]", "[", "b", "]", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "j", "=", "np", ".", "argsort", "(", "tot_1", ")", "c", "=", "int", "(", "0.05", "*", "len", "(", "tot_1", ")", ")", "d", "=", "int", "(", "0.95", "*", "len", "(", "tot_1", ")", ")", "tmin", "=", "min", "(", "tmin", ",", "tot_1", "[", "j", "]", "[", "c", "]", ")", "tmax", "=", "max", "(", "tmax", ",", "tot_1", "[", "j", "]", "[", "d", "]", ")", "ax", "[", "3", "]", ".", "hist", "(", "tot", ",", "25", ",", "range", "=", "(", "tmin", ",", "tmax", ")", ",", "histtype", "=", "'step'", ",", "color", "=", "'b'", ")", "if", "compare_to", ".", "lower", "(", ")", "!=", "'kepler'", ":", "ax", "[", "3", "]", ".", "hist", "(", "tot_1", ",", "25", ",", "range", "=", "(", "tmin", ",", "tmax", ")", ",", "histtype", "=", "'step'", ",", "color", "=", "'y'", ")", "ax", "[", "3", "]", ".", "margins", "(", "0", ",", "None", ")", "ax", "[", "3", "]", ".", "set_xlabel", "(", "'Number of Data Points'", ",", "fontsize", "=", "18", ")", "# Pickable points", "Picker", "=", "StatsPicker", "(", "[", "ax", "[", "0", "]", ",", "ax", "[", "1", "]", "]", ",", "[", "kp", ",", "kp", "]", ",", "[", "cdpp6", ",", "y", "]", ",", "epic", ",", "model", "=", "model", ",", "compare_to", "=", "compare_to", ",", "campaign", "=", "campaign", ")", "fig", ".", "canvas", ".", "mpl_connect", "(", "'pick_event'", ",", "Picker", ")", "# Show", "pl", ".", "show", "(", ")" ]
Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all long cadence light curves in a given campaign :param season: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or other \ K2 pipeline name :param bool plot: Default :py:obj:`True` :param bool injection: Statistics for injection tests? Default \ :py:obj:`False` :param bool planets: Statistics for known K2 planets? \ Default :py:obj:`False`
[ "Computes", "and", "plots", "the", "CDPP", "statistics", "comparison", "between", "model", "and", "compare_to", "for", "all", "long", "cadence", "light", "curves", "in", "a", "given", "campaign" ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1122-L1446
rodluger/everest
everest/missions/k2/k2.py
HasShortCadence
def HasShortCadence(EPIC, season=None): ''' Returns `True` if short cadence data is available for this target. :param int EPIC: The EPIC ID number :param int season: The campaign number. Default :py:obj:`None` ''' if season is None: season = Campaign(EPIC) if season is None: return None stars = GetK2Campaign(season) i = np.where([s[0] == EPIC for s in stars])[0] if len(i): return stars[i[0]][3] else: return None
python
def HasShortCadence(EPIC, season=None): ''' Returns `True` if short cadence data is available for this target. :param int EPIC: The EPIC ID number :param int season: The campaign number. Default :py:obj:`None` ''' if season is None: season = Campaign(EPIC) if season is None: return None stars = GetK2Campaign(season) i = np.where([s[0] == EPIC for s in stars])[0] if len(i): return stars[i[0]][3] else: return None
[ "def", "HasShortCadence", "(", "EPIC", ",", "season", "=", "None", ")", ":", "if", "season", "is", "None", ":", "season", "=", "Campaign", "(", "EPIC", ")", "if", "season", "is", "None", ":", "return", "None", "stars", "=", "GetK2Campaign", "(", "season", ")", "i", "=", "np", ".", "where", "(", "[", "s", "[", "0", "]", "==", "EPIC", "for", "s", "in", "stars", "]", ")", "[", "0", "]", "if", "len", "(", "i", ")", ":", "return", "stars", "[", "i", "[", "0", "]", "]", "[", "3", "]", "else", ":", "return", "None" ]
Returns `True` if short cadence data is available for this target. :param int EPIC: The EPIC ID number :param int season: The campaign number. Default :py:obj:`None`
[ "Returns", "True", "if", "short", "cadence", "data", "is", "available", "for", "this", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1449-L1467
rodluger/everest
everest/missions/k2/k2.py
InjectionStatistics
def InjectionStatistics(campaign=0, clobber=False, model='nPLD', plot=True, show=True, **kwargs): ''' Computes and plots the statistics for injection/recovery tests. :param int campaign: The campaign number. Default 0 :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` :param bool show: Show the plot? Default :py:obj:`True`. \ If :py:obj:`False`, returns the `fig, ax` instances. :param bool clobber: Overwrite existing files? Default :py:obj:`False` ''' # Compute the statistics stars = GetK2Campaign(campaign, epics_only=True) if type(campaign) is int: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.inj' % (campaign, model)) else: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%04.1f_%s.inj' % (campaign, model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Depth UControl URecovered"+ " MControl MRecovered", file=f) print("--------- ---------- ---------- ----------"+ " ---------- ----------", file=f) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:]) # Loop over all depths for depth in [0.01, 0.001, 0.0001]: try: # Unmasked data = np.load(os.path.join( path, '%s_Inject_U%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" ucontrol = data['inject'][()]['rec_depth_control'] urecovered = data['inject'][()]['rec_depth'] # Masked data = np.load(os.path.join( path, '%s_Inject_M%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" mcontrol = data['inject'][()]['rec_depth_control'] mrecovered = data['inject'][()]['rec_depth'] # Log it print("{:>09d} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f}".format( stars[i], depth, ucontrol, urecovered, mcontrol, mrecovered), file=f) except: pass print("") if plot: # Load the statistics try: epic, depth, ucontrol, urecovered, mcontrol, mrecovered = \ np.loadtxt(outfile, unpack=True, skiprows=2) except ValueError: raise Exception("No targets to plot.") # Normalize to the injected depth ucontrol /= depth urecovered /= depth mcontrol /= depth mrecovered /= depth # Set up the plot fig, ax = pl.subplots(3, 2, figsize=(9, 12)) fig.subplots_adjust(hspace=0.29) ax[0, 0].set_title(r'Unmasked', fontsize=18) ax[0, 1].set_title(r'Masked', fontsize=18) ax[0, 0].set_ylabel( r'$D_0 = 10^{-2}$', rotation=90, fontsize=18, labelpad=10) ax[1, 0].set_ylabel( r'$D_0 = 10^{-3}$', rotation=90, fontsize=18, labelpad=10) ax[2, 0].set_ylabel( r'$D_0 = 10^{-4}$', rotation=90, fontsize=18, labelpad=10) # Define some useful stuff for plotting depths = [1e-2, 1e-3, 1e-4] ranges = [(0.75, 1.25), (0.5, 1.5), (0., 2.)] nbins = [30, 30, 20] ymax = [0.4, 0.25, 0.16] xticks = [[0.75, 0.875, 1., 1.125, 1.25], [ 0.5, 0.75, 1., 1.25, 1.5], [0., 0.5, 1., 1.5, 2.0]] # Plot for i in range(3): # Indices for this plot idx = np.where(depth == depths[i]) for j, control, recovered in zip([0, 1], [ucontrol[idx], mcontrol[idx]], [urecovered[idx], mrecovered[idx]]): # Control ax[i, j].hist(control, bins=nbins[i], range=ranges[i], color='r', histtype='step', weights=np.ones_like(control) / len(control)) # Recovered ax[i, j].hist(recovered, bins=nbins[i], range=ranges[i], color='b', histtype='step', weights=np.ones_like(recovered) / len(recovered)) # Indicate center ax[i, j].axvline(1., color='k', ls='--') # Indicate the fraction above and below if len(recovered): au = len(np.where(recovered > ranges[i][1])[ 0]) / len(recovered) al = len(np.where(recovered < ranges[i][0])[ 0]) / len(recovered) ax[i, j].annotate('%.2f' % al, xy=(0.01, 0.93), xycoords='axes fraction', xytext=(0.1, 0.93), ha='left', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) ax[i, j].annotate('%.2f' % au, xy=(0.99, 0.93), xycoords='axes fraction', xytext=(0.9, 0.93), ha='right', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) if len(control): cu = len(np.where(control > ranges[i][1])[ 0]) / len(control) cl = len(np.where(control < ranges[i][0])[ 0]) / len(control) ax[i, j].annotate('%.2f' % cl, xy=(0.01, 0.86), xycoords='axes fraction', xytext=(0.1, 0.86), ha='left', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) ax[i, j].annotate('%.2f' % cu, xy=(0.99, 0.86), xycoords='axes fraction', xytext=(0.9, 0.86), ha='right', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) # Indicate the median if len(recovered): ax[i, j].annotate('M = %.2f' % np.median(recovered), xy=(0.35, 0.5), ha='right', xycoords='axes fraction', color='b', fontsize=16) if len(control): ax[i, j].annotate('M = %.2f' % np.median(control), xy=(0.65, 0.5), ha='left', xycoords='axes fraction', color='r', fontsize=16) # Tweaks ax[i, j].set_xticks(xticks[i]) ax[i, j].set_xlim(xticks[i][0], xticks[i][-1]) ax[i, j].set_ylim(-0.005, ymax[i]) ax[i, j].set_xlabel(r'$D/D_0$', fontsize=16) ax[i, j].get_yaxis().set_major_locator(MaxNLocator(5)) for tick in ax[i, j].get_xticklabels() + \ ax[i, j].get_yticklabels(): tick.set_fontsize(14) if show: pl.show() else: return fig, ax
python
def InjectionStatistics(campaign=0, clobber=False, model='nPLD', plot=True, show=True, **kwargs): ''' Computes and plots the statistics for injection/recovery tests. :param int campaign: The campaign number. Default 0 :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` :param bool show: Show the plot? Default :py:obj:`True`. \ If :py:obj:`False`, returns the `fig, ax` instances. :param bool clobber: Overwrite existing files? Default :py:obj:`False` ''' # Compute the statistics stars = GetK2Campaign(campaign, epics_only=True) if type(campaign) is int: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.inj' % (campaign, model)) else: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%04.1f_%s.inj' % (campaign, model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Depth UControl URecovered"+ " MControl MRecovered", file=f) print("--------- ---------- ---------- ----------"+ " ---------- ----------", file=f) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:]) # Loop over all depths for depth in [0.01, 0.001, 0.0001]: try: # Unmasked data = np.load(os.path.join( path, '%s_Inject_U%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" ucontrol = data['inject'][()]['rec_depth_control'] urecovered = data['inject'][()]['rec_depth'] # Masked data = np.load(os.path.join( path, '%s_Inject_M%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" mcontrol = data['inject'][()]['rec_depth_control'] mrecovered = data['inject'][()]['rec_depth'] # Log it print("{:>09d} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f}".format( stars[i], depth, ucontrol, urecovered, mcontrol, mrecovered), file=f) except: pass print("") if plot: # Load the statistics try: epic, depth, ucontrol, urecovered, mcontrol, mrecovered = \ np.loadtxt(outfile, unpack=True, skiprows=2) except ValueError: raise Exception("No targets to plot.") # Normalize to the injected depth ucontrol /= depth urecovered /= depth mcontrol /= depth mrecovered /= depth # Set up the plot fig, ax = pl.subplots(3, 2, figsize=(9, 12)) fig.subplots_adjust(hspace=0.29) ax[0, 0].set_title(r'Unmasked', fontsize=18) ax[0, 1].set_title(r'Masked', fontsize=18) ax[0, 0].set_ylabel( r'$D_0 = 10^{-2}$', rotation=90, fontsize=18, labelpad=10) ax[1, 0].set_ylabel( r'$D_0 = 10^{-3}$', rotation=90, fontsize=18, labelpad=10) ax[2, 0].set_ylabel( r'$D_0 = 10^{-4}$', rotation=90, fontsize=18, labelpad=10) # Define some useful stuff for plotting depths = [1e-2, 1e-3, 1e-4] ranges = [(0.75, 1.25), (0.5, 1.5), (0., 2.)] nbins = [30, 30, 20] ymax = [0.4, 0.25, 0.16] xticks = [[0.75, 0.875, 1., 1.125, 1.25], [ 0.5, 0.75, 1., 1.25, 1.5], [0., 0.5, 1., 1.5, 2.0]] # Plot for i in range(3): # Indices for this plot idx = np.where(depth == depths[i]) for j, control, recovered in zip([0, 1], [ucontrol[idx], mcontrol[idx]], [urecovered[idx], mrecovered[idx]]): # Control ax[i, j].hist(control, bins=nbins[i], range=ranges[i], color='r', histtype='step', weights=np.ones_like(control) / len(control)) # Recovered ax[i, j].hist(recovered, bins=nbins[i], range=ranges[i], color='b', histtype='step', weights=np.ones_like(recovered) / len(recovered)) # Indicate center ax[i, j].axvline(1., color='k', ls='--') # Indicate the fraction above and below if len(recovered): au = len(np.where(recovered > ranges[i][1])[ 0]) / len(recovered) al = len(np.where(recovered < ranges[i][0])[ 0]) / len(recovered) ax[i, j].annotate('%.2f' % al, xy=(0.01, 0.93), xycoords='axes fraction', xytext=(0.1, 0.93), ha='left', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) ax[i, j].annotate('%.2f' % au, xy=(0.99, 0.93), xycoords='axes fraction', xytext=(0.9, 0.93), ha='right', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) if len(control): cu = len(np.where(control > ranges[i][1])[ 0]) / len(control) cl = len(np.where(control < ranges[i][0])[ 0]) / len(control) ax[i, j].annotate('%.2f' % cl, xy=(0.01, 0.86), xycoords='axes fraction', xytext=(0.1, 0.86), ha='left', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) ax[i, j].annotate('%.2f' % cu, xy=(0.99, 0.86), xycoords='axes fraction', xytext=(0.9, 0.86), ha='right', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) # Indicate the median if len(recovered): ax[i, j].annotate('M = %.2f' % np.median(recovered), xy=(0.35, 0.5), ha='right', xycoords='axes fraction', color='b', fontsize=16) if len(control): ax[i, j].annotate('M = %.2f' % np.median(control), xy=(0.65, 0.5), ha='left', xycoords='axes fraction', color='r', fontsize=16) # Tweaks ax[i, j].set_xticks(xticks[i]) ax[i, j].set_xlim(xticks[i][0], xticks[i][-1]) ax[i, j].set_ylim(-0.005, ymax[i]) ax[i, j].set_xlabel(r'$D/D_0$', fontsize=16) ax[i, j].get_yaxis().set_major_locator(MaxNLocator(5)) for tick in ax[i, j].get_xticklabels() + \ ax[i, j].get_yticklabels(): tick.set_fontsize(14) if show: pl.show() else: return fig, ax
[ "def", "InjectionStatistics", "(", "campaign", "=", "0", ",", "clobber", "=", "False", ",", "model", "=", "'nPLD'", ",", "plot", "=", "True", ",", "show", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Compute the statistics", "stars", "=", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ")", "if", "type", "(", "campaign", ")", "is", "int", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.inj'", "%", "(", "campaign", ",", "model", ")", ")", "else", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%04.1f_%s.inj'", "%", "(", "campaign", ",", "model", ")", ")", "if", "clobber", "or", "not", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "f", ":", "print", "(", "\"EPIC Depth UControl URecovered\"", "+", "\" MControl MRecovered\"", ",", "file", "=", "f", ")", "print", "(", "\"--------- ---------- ---------- ----------\"", "+", "\" ---------- ----------\"", ",", "file", "=", "f", ")", "for", "i", ",", "_", "in", "enumerate", "(", "stars", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\rProcessing target %d/%d...'", "%", "(", "i", "+", "1", ",", "len", "(", "stars", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "path", "=", "os", ".", "path", ".", "join", "(", "EVEREST_DAT", ",", "'k2'", ",", "'c%02d'", "%", "int", "(", "campaign", ")", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "stars", "[", "i", "]", ")", "[", "4", ":", "]", ")", "# Loop over all depths", "for", "depth", "in", "[", "0.01", ",", "0.001", ",", "0.0001", "]", ":", "try", ":", "# Unmasked", "data", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'%s_Inject_U%g.npz'", "%", "(", "model", ",", "depth", ")", ")", ")", "assert", "depth", "==", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'depth'", "]", ",", "\"\"", "ucontrol", "=", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'rec_depth_control'", "]", "urecovered", "=", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'rec_depth'", "]", "# Masked", "data", "=", "np", ".", "load", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'%s_Inject_M%g.npz'", "%", "(", "model", ",", "depth", ")", ")", ")", "assert", "depth", "==", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'depth'", "]", ",", "\"\"", "mcontrol", "=", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'rec_depth_control'", "]", "mrecovered", "=", "data", "[", "'inject'", "]", "[", "(", ")", "]", "[", "'rec_depth'", "]", "# Log it", "print", "(", "\"{:>09d} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f}\"", ".", "format", "(", "stars", "[", "i", "]", ",", "depth", ",", "ucontrol", ",", "urecovered", ",", "mcontrol", ",", "mrecovered", ")", ",", "file", "=", "f", ")", "except", ":", "pass", "print", "(", "\"\"", ")", "if", "plot", ":", "# Load the statistics", "try", ":", "epic", ",", "depth", ",", "ucontrol", ",", "urecovered", ",", "mcontrol", ",", "mrecovered", "=", "np", ".", "loadtxt", "(", "outfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "except", "ValueError", ":", "raise", "Exception", "(", "\"No targets to plot.\"", ")", "# Normalize to the injected depth", "ucontrol", "/=", "depth", "urecovered", "/=", "depth", "mcontrol", "/=", "depth", "mrecovered", "/=", "depth", "# Set up the plot", "fig", ",", "ax", "=", "pl", ".", "subplots", "(", "3", ",", "2", ",", "figsize", "=", "(", "9", ",", "12", ")", ")", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0.29", ")", "ax", "[", "0", ",", "0", "]", ".", "set_title", "(", "r'Unmasked'", ",", "fontsize", "=", "18", ")", "ax", "[", "0", ",", "1", "]", ".", "set_title", "(", "r'Masked'", ",", "fontsize", "=", "18", ")", "ax", "[", "0", ",", "0", "]", ".", "set_ylabel", "(", "r'$D_0 = 10^{-2}$'", ",", "rotation", "=", "90", ",", "fontsize", "=", "18", ",", "labelpad", "=", "10", ")", "ax", "[", "1", ",", "0", "]", ".", "set_ylabel", "(", "r'$D_0 = 10^{-3}$'", ",", "rotation", "=", "90", ",", "fontsize", "=", "18", ",", "labelpad", "=", "10", ")", "ax", "[", "2", ",", "0", "]", ".", "set_ylabel", "(", "r'$D_0 = 10^{-4}$'", ",", "rotation", "=", "90", ",", "fontsize", "=", "18", ",", "labelpad", "=", "10", ")", "# Define some useful stuff for plotting", "depths", "=", "[", "1e-2", ",", "1e-3", ",", "1e-4", "]", "ranges", "=", "[", "(", "0.75", ",", "1.25", ")", ",", "(", "0.5", ",", "1.5", ")", ",", "(", "0.", ",", "2.", ")", "]", "nbins", "=", "[", "30", ",", "30", ",", "20", "]", "ymax", "=", "[", "0.4", ",", "0.25", ",", "0.16", "]", "xticks", "=", "[", "[", "0.75", ",", "0.875", ",", "1.", ",", "1.125", ",", "1.25", "]", ",", "[", "0.5", ",", "0.75", ",", "1.", ",", "1.25", ",", "1.5", "]", ",", "[", "0.", ",", "0.5", ",", "1.", ",", "1.5", ",", "2.0", "]", "]", "# Plot", "for", "i", "in", "range", "(", "3", ")", ":", "# Indices for this plot", "idx", "=", "np", ".", "where", "(", "depth", "==", "depths", "[", "i", "]", ")", "for", "j", ",", "control", ",", "recovered", "in", "zip", "(", "[", "0", ",", "1", "]", ",", "[", "ucontrol", "[", "idx", "]", ",", "mcontrol", "[", "idx", "]", "]", ",", "[", "urecovered", "[", "idx", "]", ",", "mrecovered", "[", "idx", "]", "]", ")", ":", "# Control", "ax", "[", "i", ",", "j", "]", ".", "hist", "(", "control", ",", "bins", "=", "nbins", "[", "i", "]", ",", "range", "=", "ranges", "[", "i", "]", ",", "color", "=", "'r'", ",", "histtype", "=", "'step'", ",", "weights", "=", "np", ".", "ones_like", "(", "control", ")", "/", "len", "(", "control", ")", ")", "# Recovered", "ax", "[", "i", ",", "j", "]", ".", "hist", "(", "recovered", ",", "bins", "=", "nbins", "[", "i", "]", ",", "range", "=", "ranges", "[", "i", "]", ",", "color", "=", "'b'", ",", "histtype", "=", "'step'", ",", "weights", "=", "np", ".", "ones_like", "(", "recovered", ")", "/", "len", "(", "recovered", ")", ")", "# Indicate center", "ax", "[", "i", ",", "j", "]", ".", "axvline", "(", "1.", ",", "color", "=", "'k'", ",", "ls", "=", "'--'", ")", "# Indicate the fraction above and below", "if", "len", "(", "recovered", ")", ":", "au", "=", "len", "(", "np", ".", "where", "(", "recovered", ">", "ranges", "[", "i", "]", "[", "1", "]", ")", "[", "0", "]", ")", "/", "len", "(", "recovered", ")", "al", "=", "len", "(", "np", ".", "where", "(", "recovered", "<", "ranges", "[", "i", "]", "[", "0", "]", ")", "[", "0", "]", ")", "/", "len", "(", "recovered", ")", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'%.2f'", "%", "al", ",", "xy", "=", "(", "0.01", ",", "0.93", ")", ",", "xycoords", "=", "'axes fraction'", ",", "xytext", "=", "(", "0.1", ",", "0.93", ")", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "color", "=", "'b'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"->\"", ",", "color", "=", "'b'", ")", ")", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'%.2f'", "%", "au", ",", "xy", "=", "(", "0.99", ",", "0.93", ")", ",", "xycoords", "=", "'axes fraction'", ",", "xytext", "=", "(", "0.9", ",", "0.93", ")", ",", "ha", "=", "'right'", ",", "va", "=", "'center'", ",", "color", "=", "'b'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"->\"", ",", "color", "=", "'b'", ")", ")", "if", "len", "(", "control", ")", ":", "cu", "=", "len", "(", "np", ".", "where", "(", "control", ">", "ranges", "[", "i", "]", "[", "1", "]", ")", "[", "0", "]", ")", "/", "len", "(", "control", ")", "cl", "=", "len", "(", "np", ".", "where", "(", "control", "<", "ranges", "[", "i", "]", "[", "0", "]", ")", "[", "0", "]", ")", "/", "len", "(", "control", ")", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'%.2f'", "%", "cl", ",", "xy", "=", "(", "0.01", ",", "0.86", ")", ",", "xycoords", "=", "'axes fraction'", ",", "xytext", "=", "(", "0.1", ",", "0.86", ")", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "color", "=", "'r'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"->\"", ",", "color", "=", "'r'", ")", ")", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'%.2f'", "%", "cu", ",", "xy", "=", "(", "0.99", ",", "0.86", ")", ",", "xycoords", "=", "'axes fraction'", ",", "xytext", "=", "(", "0.9", ",", "0.86", ")", ",", "ha", "=", "'right'", ",", "va", "=", "'center'", ",", "color", "=", "'r'", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"->\"", ",", "color", "=", "'r'", ")", ")", "# Indicate the median", "if", "len", "(", "recovered", ")", ":", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'M = %.2f'", "%", "np", ".", "median", "(", "recovered", ")", ",", "xy", "=", "(", "0.35", ",", "0.5", ")", ",", "ha", "=", "'right'", ",", "xycoords", "=", "'axes fraction'", ",", "color", "=", "'b'", ",", "fontsize", "=", "16", ")", "if", "len", "(", "control", ")", ":", "ax", "[", "i", ",", "j", "]", ".", "annotate", "(", "'M = %.2f'", "%", "np", ".", "median", "(", "control", ")", ",", "xy", "=", "(", "0.65", ",", "0.5", ")", ",", "ha", "=", "'left'", ",", "xycoords", "=", "'axes fraction'", ",", "color", "=", "'r'", ",", "fontsize", "=", "16", ")", "# Tweaks", "ax", "[", "i", ",", "j", "]", ".", "set_xticks", "(", "xticks", "[", "i", "]", ")", "ax", "[", "i", ",", "j", "]", ".", "set_xlim", "(", "xticks", "[", "i", "]", "[", "0", "]", ",", "xticks", "[", "i", "]", "[", "-", "1", "]", ")", "ax", "[", "i", ",", "j", "]", ".", "set_ylim", "(", "-", "0.005", ",", "ymax", "[", "i", "]", ")", "ax", "[", "i", ",", "j", "]", ".", "set_xlabel", "(", "r'$D/D_0$'", ",", "fontsize", "=", "16", ")", "ax", "[", "i", ",", "j", "]", ".", "get_yaxis", "(", ")", ".", "set_major_locator", "(", "MaxNLocator", "(", "5", ")", ")", "for", "tick", "in", "ax", "[", "i", ",", "j", "]", ".", "get_xticklabels", "(", ")", "+", "ax", "[", "i", ",", "j", "]", ".", "get_yticklabels", "(", ")", ":", "tick", ".", "set_fontsize", "(", "14", ")", "if", "show", ":", "pl", ".", "show", "(", ")", "else", ":", "return", "fig", ",", "ax" ]
Computes and plots the statistics for injection/recovery tests. :param int campaign: The campaign number. Default 0 :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` :param bool show: Show the plot? Default :py:obj:`True`. \ If :py:obj:`False`, returns the `fig, ax` instances. :param bool clobber: Overwrite existing files? Default :py:obj:`False`
[ "Computes", "and", "plots", "the", "statistics", "for", "injection", "/", "recovery", "tests", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1470-L1656
rodluger/everest
everest/missions/k2/k2.py
HDUCards
def HDUCards(headers, hdu=0): ''' Generates HDU cards for inclusion in the de-trended light curve FITS file. Used internally. ''' if headers is None: return [] if hdu == 0: # Get info from the TPF Primary HDU Header tpf_header = headers[0] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'CHANNEL', 'MODULE', 'OUTPUT', 'CAMPAIGN', 'DATA_REL', 'OBSMODE', 'TTABLEID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'KEPMAG'] elif (hdu == 1) or (hdu == 6): # Get info from the TPF BinTable HDU Header tpf_header = headers[1] entries = ['WCSN4P', 'WCAX4P', '1CTY4P', '2CTY4P', '1CUN4P', '2CUN4P', '1CRV4P', '2CRV4P', '1CDL4P', '2CDL4P', '1CRP4P', '2CRP4P', 'WCAX4', '1CTYP4', '2CTYP4', '1CRPX4', '2CRPX4', '1CRVL4', '2CRVL4', '1CUNI4', '2CUNI4', '1CDLT4', '2CDLT4', '11PC4', '12PC4', '21PC4', '22PC4', 'WCSN5P', 'WCAX5P', '1CTY5P', '2CTY5P', '1CUN5P', '2CUN5P', '1CRV5P', '2CRV5P', '1CDL5P', '2CDL5P', '1CRP5P', '2CRP5P', 'WCAX5', '1CTYP5', '2CTYP5', '1CRPX5', '2CRPX5', '1CRVL5', '2CRVL5', '1CUNI5', '2CUNI5', '1CDLT5', '2CDLT5', '11PC5', '12PC5', '21PC5', '22PC5', 'WCSN6P', 'WCAX6P', '1CTY6P', '2CTY6P', '1CUN6P', '2CUN6P', '1CRV6P', '2CRV6P', '1CDL6P', '2CDL6P', '1CRP6P', '2CRP6P', 'WCAX6', '1CTYP6', '2CTYP6', '1CRPX6', '2CRPX6', '1CRVL6', '2CRVL6', '1CUNI6', '2CUNI6', '1CDLT6', '2CDLT6', '11PC6', '12PC6', '21PC6', '22PC6', 'WCSN7P', 'WCAX7P', '1CTY7P', '2CTY7P', '1CUN7P', '2CUN7P', '1CRV7P', '2CRV7P', '1CDL7P', '2CDL7P', '1CRP7P', '2CRP7P', 'WCAX7', '1CTYP7', '2CTYP7', '1CRPX7', '2CRPX7', '1CRVL7', '2CRVL7', '1CUNI7', '2CUNI7', '1CDLT7', '2CDLT7', '11PC7', '12PC7', '21PC7', '22PC7', 'WCSN8P', 'WCAX8P', '1CTY8P', '2CTY8P', '1CUN8P', '2CUN8P', '1CRV8P', '2CRV8P', '1CDL8P', '2CDL8P', '1CRP8P', '2CRP8P', 'WCAX8', '1CTYP8', '2CTYP8', '1CRPX8', '2CRPX8', '1CRVL8', '2CRVL8', '1CUNI8', '2CUNI8', '1CDLT8', '2CDLT8', '11PC8', '12PC8', '21PC8', '22PC8', 'WCSN9P', 'WCAX9P', '1CTY9P', '2CTY9P', '1CUN9P', '2CUN9P', '1CRV9P', '2CRV9P', '1CDL9P', '2CDL9P', '1CRP9P', '2CRP9P', 'WCAX9', '1CTYP9', '2CTYP9', '1CRPX9', '2CRPX9', '1CRVL9', '2CRVL9', '1CUNI9', '2CUNI9', '1CDLT9', '2CDLT9', '11PC9', '12PC9', '21PC9', '22PC9', 'INHERIT', 'EXTNAME', 'EXTVER', 'TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'EXPOSURE', 'TIMEREF', 'TASSIGN', 'TIMESYS', 'BJDREFI', 'BJDREFF', 'TIMEUNIT', 'TELAPSE', 'LIVETIME', 'TSTART', 'TSTOP', 'LC_START', 'LC_END', 'DEADC', 'TIMEPIXR', 'TIERRELA', 'INT_TIME', 'READTIME', 'FRAMETIM', 'NUM_FRM', 'TIMEDEL', 'DATE-OBS', 'DATE-END', 'BACKAPP', 'DEADAPP', 'VIGNAPP', 'GAIN', 'READNOIS', 'NREADOUT', 'TIMSLICE', 'MEANBLCK', 'LCFXDOFF', 'SCFXDOFF'] elif (hdu == 3) or (hdu == 4) or (hdu == 5): # Get info from the TPF BinTable HDU Header tpf_header = headers[2] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'WCSAXES', 'CTYPE1', 'CTYPE2', 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2', 'CDELT1', 'CDELT2', 'PC1_1', 'PC1_2', 'PC2_1', 'PC2_2', 'WCSNAMEP', 'WCSAXESP', 'CTYPE1P', 'CUNIT1P', 'CRPIX1P', 'CRVAL1P', 'CDELT1P', 'CTYPE2P', 'CUNIT2P', 'CRPIX2P', 'CRVAL2P', 'CDELT2P', 'NPIXSAP', 'NPIXMISS'] else: return [] cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* MISSION INFO *')) cards.append(('COMMENT', '************************')) for entry in entries: try: cards.append(tuple(tpf_header[entry])) except KeyError: pass return cards
python
def HDUCards(headers, hdu=0): ''' Generates HDU cards for inclusion in the de-trended light curve FITS file. Used internally. ''' if headers is None: return [] if hdu == 0: # Get info from the TPF Primary HDU Header tpf_header = headers[0] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'CHANNEL', 'MODULE', 'OUTPUT', 'CAMPAIGN', 'DATA_REL', 'OBSMODE', 'TTABLEID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'KEPMAG'] elif (hdu == 1) or (hdu == 6): # Get info from the TPF BinTable HDU Header tpf_header = headers[1] entries = ['WCSN4P', 'WCAX4P', '1CTY4P', '2CTY4P', '1CUN4P', '2CUN4P', '1CRV4P', '2CRV4P', '1CDL4P', '2CDL4P', '1CRP4P', '2CRP4P', 'WCAX4', '1CTYP4', '2CTYP4', '1CRPX4', '2CRPX4', '1CRVL4', '2CRVL4', '1CUNI4', '2CUNI4', '1CDLT4', '2CDLT4', '11PC4', '12PC4', '21PC4', '22PC4', 'WCSN5P', 'WCAX5P', '1CTY5P', '2CTY5P', '1CUN5P', '2CUN5P', '1CRV5P', '2CRV5P', '1CDL5P', '2CDL5P', '1CRP5P', '2CRP5P', 'WCAX5', '1CTYP5', '2CTYP5', '1CRPX5', '2CRPX5', '1CRVL5', '2CRVL5', '1CUNI5', '2CUNI5', '1CDLT5', '2CDLT5', '11PC5', '12PC5', '21PC5', '22PC5', 'WCSN6P', 'WCAX6P', '1CTY6P', '2CTY6P', '1CUN6P', '2CUN6P', '1CRV6P', '2CRV6P', '1CDL6P', '2CDL6P', '1CRP6P', '2CRP6P', 'WCAX6', '1CTYP6', '2CTYP6', '1CRPX6', '2CRPX6', '1CRVL6', '2CRVL6', '1CUNI6', '2CUNI6', '1CDLT6', '2CDLT6', '11PC6', '12PC6', '21PC6', '22PC6', 'WCSN7P', 'WCAX7P', '1CTY7P', '2CTY7P', '1CUN7P', '2CUN7P', '1CRV7P', '2CRV7P', '1CDL7P', '2CDL7P', '1CRP7P', '2CRP7P', 'WCAX7', '1CTYP7', '2CTYP7', '1CRPX7', '2CRPX7', '1CRVL7', '2CRVL7', '1CUNI7', '2CUNI7', '1CDLT7', '2CDLT7', '11PC7', '12PC7', '21PC7', '22PC7', 'WCSN8P', 'WCAX8P', '1CTY8P', '2CTY8P', '1CUN8P', '2CUN8P', '1CRV8P', '2CRV8P', '1CDL8P', '2CDL8P', '1CRP8P', '2CRP8P', 'WCAX8', '1CTYP8', '2CTYP8', '1CRPX8', '2CRPX8', '1CRVL8', '2CRVL8', '1CUNI8', '2CUNI8', '1CDLT8', '2CDLT8', '11PC8', '12PC8', '21PC8', '22PC8', 'WCSN9P', 'WCAX9P', '1CTY9P', '2CTY9P', '1CUN9P', '2CUN9P', '1CRV9P', '2CRV9P', '1CDL9P', '2CDL9P', '1CRP9P', '2CRP9P', 'WCAX9', '1CTYP9', '2CTYP9', '1CRPX9', '2CRPX9', '1CRVL9', '2CRVL9', '1CUNI9', '2CUNI9', '1CDLT9', '2CDLT9', '11PC9', '12PC9', '21PC9', '22PC9', 'INHERIT', 'EXTNAME', 'EXTVER', 'TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'EXPOSURE', 'TIMEREF', 'TASSIGN', 'TIMESYS', 'BJDREFI', 'BJDREFF', 'TIMEUNIT', 'TELAPSE', 'LIVETIME', 'TSTART', 'TSTOP', 'LC_START', 'LC_END', 'DEADC', 'TIMEPIXR', 'TIERRELA', 'INT_TIME', 'READTIME', 'FRAMETIM', 'NUM_FRM', 'TIMEDEL', 'DATE-OBS', 'DATE-END', 'BACKAPP', 'DEADAPP', 'VIGNAPP', 'GAIN', 'READNOIS', 'NREADOUT', 'TIMSLICE', 'MEANBLCK', 'LCFXDOFF', 'SCFXDOFF'] elif (hdu == 3) or (hdu == 4) or (hdu == 5): # Get info from the TPF BinTable HDU Header tpf_header = headers[2] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'WCSAXES', 'CTYPE1', 'CTYPE2', 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2', 'CDELT1', 'CDELT2', 'PC1_1', 'PC1_2', 'PC2_1', 'PC2_2', 'WCSNAMEP', 'WCSAXESP', 'CTYPE1P', 'CUNIT1P', 'CRPIX1P', 'CRVAL1P', 'CDELT1P', 'CTYPE2P', 'CUNIT2P', 'CRPIX2P', 'CRVAL2P', 'CDELT2P', 'NPIXSAP', 'NPIXMISS'] else: return [] cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* MISSION INFO *')) cards.append(('COMMENT', '************************')) for entry in entries: try: cards.append(tuple(tpf_header[entry])) except KeyError: pass return cards
[ "def", "HDUCards", "(", "headers", ",", "hdu", "=", "0", ")", ":", "if", "headers", "is", "None", ":", "return", "[", "]", "if", "hdu", "==", "0", ":", "# Get info from the TPF Primary HDU Header", "tpf_header", "=", "headers", "[", "0", "]", "entries", "=", "[", "'TELESCOP'", ",", "'INSTRUME'", ",", "'OBJECT'", ",", "'KEPLERID'", ",", "'CHANNEL'", ",", "'MODULE'", ",", "'OUTPUT'", ",", "'CAMPAIGN'", ",", "'DATA_REL'", ",", "'OBSMODE'", ",", "'TTABLEID'", ",", "'RADESYS'", ",", "'RA_OBJ'", ",", "'DEC_OBJ'", ",", "'EQUINOX'", ",", "'KEPMAG'", "]", "elif", "(", "hdu", "==", "1", ")", "or", "(", "hdu", "==", "6", ")", ":", "# Get info from the TPF BinTable HDU Header", "tpf_header", "=", "headers", "[", "1", "]", "entries", "=", "[", "'WCSN4P'", ",", "'WCAX4P'", ",", "'1CTY4P'", ",", "'2CTY4P'", ",", "'1CUN4P'", ",", "'2CUN4P'", ",", "'1CRV4P'", ",", "'2CRV4P'", ",", "'1CDL4P'", ",", "'2CDL4P'", ",", "'1CRP4P'", ",", "'2CRP4P'", ",", "'WCAX4'", ",", "'1CTYP4'", ",", "'2CTYP4'", ",", "'1CRPX4'", ",", "'2CRPX4'", ",", "'1CRVL4'", ",", "'2CRVL4'", ",", "'1CUNI4'", ",", "'2CUNI4'", ",", "'1CDLT4'", ",", "'2CDLT4'", ",", "'11PC4'", ",", "'12PC4'", ",", "'21PC4'", ",", "'22PC4'", ",", "'WCSN5P'", ",", "'WCAX5P'", ",", "'1CTY5P'", ",", "'2CTY5P'", ",", "'1CUN5P'", ",", "'2CUN5P'", ",", "'1CRV5P'", ",", "'2CRV5P'", ",", "'1CDL5P'", ",", "'2CDL5P'", ",", "'1CRP5P'", ",", "'2CRP5P'", ",", "'WCAX5'", ",", "'1CTYP5'", ",", "'2CTYP5'", ",", "'1CRPX5'", ",", "'2CRPX5'", ",", "'1CRVL5'", ",", "'2CRVL5'", ",", "'1CUNI5'", ",", "'2CUNI5'", ",", "'1CDLT5'", ",", "'2CDLT5'", ",", "'11PC5'", ",", "'12PC5'", ",", "'21PC5'", ",", "'22PC5'", ",", "'WCSN6P'", ",", "'WCAX6P'", ",", "'1CTY6P'", ",", "'2CTY6P'", ",", "'1CUN6P'", ",", "'2CUN6P'", ",", "'1CRV6P'", ",", "'2CRV6P'", ",", "'1CDL6P'", ",", "'2CDL6P'", ",", "'1CRP6P'", ",", "'2CRP6P'", ",", "'WCAX6'", ",", "'1CTYP6'", ",", "'2CTYP6'", ",", "'1CRPX6'", ",", "'2CRPX6'", ",", "'1CRVL6'", ",", "'2CRVL6'", ",", "'1CUNI6'", ",", "'2CUNI6'", ",", "'1CDLT6'", ",", "'2CDLT6'", ",", "'11PC6'", ",", "'12PC6'", ",", "'21PC6'", ",", "'22PC6'", ",", "'WCSN7P'", ",", "'WCAX7P'", ",", "'1CTY7P'", ",", "'2CTY7P'", ",", "'1CUN7P'", ",", "'2CUN7P'", ",", "'1CRV7P'", ",", "'2CRV7P'", ",", "'1CDL7P'", ",", "'2CDL7P'", ",", "'1CRP7P'", ",", "'2CRP7P'", ",", "'WCAX7'", ",", "'1CTYP7'", ",", "'2CTYP7'", ",", "'1CRPX7'", ",", "'2CRPX7'", ",", "'1CRVL7'", ",", "'2CRVL7'", ",", "'1CUNI7'", ",", "'2CUNI7'", ",", "'1CDLT7'", ",", "'2CDLT7'", ",", "'11PC7'", ",", "'12PC7'", ",", "'21PC7'", ",", "'22PC7'", ",", "'WCSN8P'", ",", "'WCAX8P'", ",", "'1CTY8P'", ",", "'2CTY8P'", ",", "'1CUN8P'", ",", "'2CUN8P'", ",", "'1CRV8P'", ",", "'2CRV8P'", ",", "'1CDL8P'", ",", "'2CDL8P'", ",", "'1CRP8P'", ",", "'2CRP8P'", ",", "'WCAX8'", ",", "'1CTYP8'", ",", "'2CTYP8'", ",", "'1CRPX8'", ",", "'2CRPX8'", ",", "'1CRVL8'", ",", "'2CRVL8'", ",", "'1CUNI8'", ",", "'2CUNI8'", ",", "'1CDLT8'", ",", "'2CDLT8'", ",", "'11PC8'", ",", "'12PC8'", ",", "'21PC8'", ",", "'22PC8'", ",", "'WCSN9P'", ",", "'WCAX9P'", ",", "'1CTY9P'", ",", "'2CTY9P'", ",", "'1CUN9P'", ",", "'2CUN9P'", ",", "'1CRV9P'", ",", "'2CRV9P'", ",", "'1CDL9P'", ",", "'2CDL9P'", ",", "'1CRP9P'", ",", "'2CRP9P'", ",", "'WCAX9'", ",", "'1CTYP9'", ",", "'2CTYP9'", ",", "'1CRPX9'", ",", "'2CRPX9'", ",", "'1CRVL9'", ",", "'2CRVL9'", ",", "'1CUNI9'", ",", "'2CUNI9'", ",", "'1CDLT9'", ",", "'2CDLT9'", ",", "'11PC9'", ",", "'12PC9'", ",", "'21PC9'", ",", "'22PC9'", ",", "'INHERIT'", ",", "'EXTNAME'", ",", "'EXTVER'", ",", "'TELESCOP'", ",", "'INSTRUME'", ",", "'OBJECT'", ",", "'KEPLERID'", ",", "'RADESYS'", ",", "'RA_OBJ'", ",", "'DEC_OBJ'", ",", "'EQUINOX'", ",", "'EXPOSURE'", ",", "'TIMEREF'", ",", "'TASSIGN'", ",", "'TIMESYS'", ",", "'BJDREFI'", ",", "'BJDREFF'", ",", "'TIMEUNIT'", ",", "'TELAPSE'", ",", "'LIVETIME'", ",", "'TSTART'", ",", "'TSTOP'", ",", "'LC_START'", ",", "'LC_END'", ",", "'DEADC'", ",", "'TIMEPIXR'", ",", "'TIERRELA'", ",", "'INT_TIME'", ",", "'READTIME'", ",", "'FRAMETIM'", ",", "'NUM_FRM'", ",", "'TIMEDEL'", ",", "'DATE-OBS'", ",", "'DATE-END'", ",", "'BACKAPP'", ",", "'DEADAPP'", ",", "'VIGNAPP'", ",", "'GAIN'", ",", "'READNOIS'", ",", "'NREADOUT'", ",", "'TIMSLICE'", ",", "'MEANBLCK'", ",", "'LCFXDOFF'", ",", "'SCFXDOFF'", "]", "elif", "(", "hdu", "==", "3", ")", "or", "(", "hdu", "==", "4", ")", "or", "(", "hdu", "==", "5", ")", ":", "# Get info from the TPF BinTable HDU Header", "tpf_header", "=", "headers", "[", "2", "]", "entries", "=", "[", "'TELESCOP'", ",", "'INSTRUME'", ",", "'OBJECT'", ",", "'KEPLERID'", ",", "'RADESYS'", ",", "'RA_OBJ'", ",", "'DEC_OBJ'", ",", "'EQUINOX'", ",", "'WCSAXES'", ",", "'CTYPE1'", ",", "'CTYPE2'", ",", "'CRPIX1'", ",", "'CRPIX2'", ",", "'CRVAL1'", ",", "'CRVAL2'", ",", "'CUNIT1'", ",", "'CUNIT2'", ",", "'CDELT1'", ",", "'CDELT2'", ",", "'PC1_1'", ",", "'PC1_2'", ",", "'PC2_1'", ",", "'PC2_2'", ",", "'WCSNAMEP'", ",", "'WCSAXESP'", ",", "'CTYPE1P'", ",", "'CUNIT1P'", ",", "'CRPIX1P'", ",", "'CRVAL1P'", ",", "'CDELT1P'", ",", "'CTYPE2P'", ",", "'CUNIT2P'", ",", "'CRPIX2P'", ",", "'CRVAL2P'", ",", "'CDELT2P'", ",", "'NPIXSAP'", ",", "'NPIXMISS'", "]", "else", ":", "return", "[", "]", "cards", "=", "[", "]", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'* MISSION INFO *'", ")", ")", "cards", ".", "append", "(", "(", "'COMMENT'", ",", "'************************'", ")", ")", "for", "entry", "in", "entries", ":", "try", ":", "cards", ".", "append", "(", "tuple", "(", "tpf_header", "[", "entry", "]", ")", ")", "except", "KeyError", ":", "pass", "return", "cards" ]
Generates HDU cards for inclusion in the de-trended light curve FITS file. Used internally.
[ "Generates", "HDU", "cards", "for", "inclusion", "in", "the", "de", "-", "trended", "light", "curve", "FITS", "file", ".", "Used", "internally", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1659-L1763
rodluger/everest
everest/missions/k2/k2.py
TargetDirectory
def TargetDirectory(ID, season, relative=False, **kwargs): ''' Returns the location of the :py:mod:`everest` data on disk for a given target. :param ID: The target ID :param int season: The target season number :param bool relative: Relative path? Default :py:obj:`False` ''' if season is None: return None if relative: path = '' else: path = EVEREST_DAT return os.path.join(path, 'k2', 'c%02d' % season, ('%09d' % ID)[:4] + '00000', ('%09d' % ID)[4:])
python
def TargetDirectory(ID, season, relative=False, **kwargs): ''' Returns the location of the :py:mod:`everest` data on disk for a given target. :param ID: The target ID :param int season: The target season number :param bool relative: Relative path? Default :py:obj:`False` ''' if season is None: return None if relative: path = '' else: path = EVEREST_DAT return os.path.join(path, 'k2', 'c%02d' % season, ('%09d' % ID)[:4] + '00000', ('%09d' % ID)[4:])
[ "def", "TargetDirectory", "(", "ID", ",", "season", ",", "relative", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "season", "is", "None", ":", "return", "None", "if", "relative", ":", "path", "=", "''", "else", ":", "path", "=", "EVEREST_DAT", "return", "os", ".", "path", ".", "join", "(", "path", ",", "'k2'", ",", "'c%02d'", "%", "season", ",", "(", "'%09d'", "%", "ID", ")", "[", ":", "4", "]", "+", "'00000'", ",", "(", "'%09d'", "%", "ID", ")", "[", "4", ":", "]", ")" ]
Returns the location of the :py:mod:`everest` data on disk for a given target. :param ID: The target ID :param int season: The target season number :param bool relative: Relative path? Default :py:obj:`False`
[ "Returns", "the", "location", "of", "the", ":", "py", ":", "mod", ":", "everest", "data", "on", "disk", "for", "a", "given", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1766-L1785
rodluger/everest
everest/missions/k2/k2.py
DVSFile
def DVSFile(ID, season, cadence='lc'): ''' Returns the name of the DVS PDF for a given target. :param ID: The target ID :param int season: The target season number :param str cadence: The cadence type. Default `lc` ''' if cadence == 'sc': strcadence = '_sc' else: strcadence = '' return 'hlsp_everest_k2_llc_%d-c%02d_kepler_v%s_dvs%s.pdf' \ % (ID, season, EVEREST_MAJOR_MINOR, strcadence)
python
def DVSFile(ID, season, cadence='lc'): ''' Returns the name of the DVS PDF for a given target. :param ID: The target ID :param int season: The target season number :param str cadence: The cadence type. Default `lc` ''' if cadence == 'sc': strcadence = '_sc' else: strcadence = '' return 'hlsp_everest_k2_llc_%d-c%02d_kepler_v%s_dvs%s.pdf' \ % (ID, season, EVEREST_MAJOR_MINOR, strcadence)
[ "def", "DVSFile", "(", "ID", ",", "season", ",", "cadence", "=", "'lc'", ")", ":", "if", "cadence", "==", "'sc'", ":", "strcadence", "=", "'_sc'", "else", ":", "strcadence", "=", "''", "return", "'hlsp_everest_k2_llc_%d-c%02d_kepler_v%s_dvs%s.pdf'", "%", "(", "ID", ",", "season", ",", "EVEREST_MAJOR_MINOR", ",", "strcadence", ")" ]
Returns the name of the DVS PDF for a given target. :param ID: The target ID :param int season: The target season number :param str cadence: The cadence type. Default `lc`
[ "Returns", "the", "name", "of", "the", "DVS", "PDF", "for", "a", "given", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1801-L1816
rodluger/everest
everest/missions/k2/k2.py
GetTargetCBVs
def GetTargetCBVs(model): ''' Returns the design matrix of CBVs for the given target. :param model: An instance of the :py:obj:`everest` model for the target ''' # Get the info season = model.season name = model.name # We use the LC light curves as CBVs; there aren't # enough SC light curves to get a good set if name.endswith('.sc'): name = name[:-3] model.XCBV = sysrem.GetCBVs(season, model=name, niter=model.cbv_niter, sv_win=model.cbv_win, sv_order=model.cbv_order)
python
def GetTargetCBVs(model): ''' Returns the design matrix of CBVs for the given target. :param model: An instance of the :py:obj:`everest` model for the target ''' # Get the info season = model.season name = model.name # We use the LC light curves as CBVs; there aren't # enough SC light curves to get a good set if name.endswith('.sc'): name = name[:-3] model.XCBV = sysrem.GetCBVs(season, model=name, niter=model.cbv_niter, sv_win=model.cbv_win, sv_order=model.cbv_order)
[ "def", "GetTargetCBVs", "(", "model", ")", ":", "# Get the info", "season", "=", "model", ".", "season", "name", "=", "model", ".", "name", "# We use the LC light curves as CBVs; there aren't", "# enough SC light curves to get a good set", "if", "name", ".", "endswith", "(", "'.sc'", ")", ":", "name", "=", "name", "[", ":", "-", "3", "]", "model", ".", "XCBV", "=", "sysrem", ".", "GetCBVs", "(", "season", ",", "model", "=", "name", ",", "niter", "=", "model", ".", "cbv_niter", ",", "sv_win", "=", "model", ".", "cbv_win", ",", "sv_order", "=", "model", ".", "cbv_order", ")" ]
Returns the design matrix of CBVs for the given target. :param model: An instance of the :py:obj:`everest` model for the target
[ "Returns", "the", "design", "matrix", "of", "CBVs", "for", "the", "given", "target", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1847-L1867
rodluger/everest
everest/missions/k2/k2.py
FitCBVs
def FitCBVs(model): ''' Fits the CBV design matrix to the de-trended flux of a given target. This is called internally whenever the user accesses the :py:attr:`fcor` attribute. :param model: An instance of the :py:obj:`everest` model for the target ''' # Get cbvs? if model.XCBV is None: GetTargetCBVs(model) # The number of CBVs to use ncbv = model.cbv_num # Need to treat short and long cadences differently if model.cadence == 'lc': # Loop over all the light curve segments m = [None for b in range(len(model.breakpoints))] weights = [None for b in range(len(model.breakpoints))] for b in range(len(model.breakpoints)): # Get the indices for this light curve segment inds = model.get_chunk(b, pad=False) masked_inds = model.get_masked_chunk(b, pad=False) # Regress mX = model.XCBV[masked_inds, :ncbv + 1] A = np.dot(mX.T, mX) B = np.dot(mX.T, model.flux[masked_inds]) try: weights[b] = np.linalg.solve(A, B) except np.linalg.linalg.LinAlgError: # Singular matrix log.warn('Singular matrix!') weights[b] = np.zeros(mX.shape[1]) m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b]) # Vertical alignment if b == 0: m[b] -= np.nanmedian(m[b]) else: # Match the first finite model point on either side of the # break # We could consider something more elaborate in the future i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i]) for i in range(1, len(m[b - 1]) - 1)]) i1 = np.argmax([np.isfinite(m[b][i]) for i in range(len(m[b]))]) m[b] += (m[b - 1][i0] - m[b][i1]) # Join model and normalize m = np.concatenate(m) m -= np.nanmedian(m) else: # Interpolate over outliers so we don't have to worry # about masking the arrays below flux = Interpolate(model.time, model.mask, model.flux) # Get downbinned light curve newsize = len(model.time) // 30 time = Downbin(model.time, newsize, operation='mean') flux = Downbin(flux, newsize, operation='mean') # Get LC breakpoints breakpoints = list(Breakpoints( model.ID, season=model.season, cadence='lc')) breakpoints += [len(time) - 1] # Loop over all the light curve segments m = [None for b in range(len(breakpoints))] weights = [None for b in range(len(breakpoints))] for b in range(len(breakpoints)): # Get the indices for this light curve segment M = np.arange(len(time)) if b > 0: inds = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: inds = M[M <= breakpoints[b]] # Regress A = np.dot(model.XCBV[inds, :ncbv + 1].T, model.XCBV[inds, :ncbv + 1]) B = np.dot(model.XCBV[inds, :ncbv + 1].T, flux[inds]) weights[b] = np.linalg.solve(A, B) m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b]) # Vertical alignment if b == 0: m[b] -= np.nanmedian(m[b]) else: # Match the first finite model point on either side of the # break # We could consider something more elaborate in the future i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i]) for i in range(1, len(m[b - 1]) - 1)]) i1 = np.argmax([np.isfinite(m[b][i]) for i in range(len(m[b]))]) m[b] += (m[b - 1][i0] - m[b][i1]) # Join model and normalize m = np.concatenate(m) m -= np.nanmedian(m) # Finally, interpolate back to short cadence m = np.interp(model.time, time, m) return m
python
def FitCBVs(model): ''' Fits the CBV design matrix to the de-trended flux of a given target. This is called internally whenever the user accesses the :py:attr:`fcor` attribute. :param model: An instance of the :py:obj:`everest` model for the target ''' # Get cbvs? if model.XCBV is None: GetTargetCBVs(model) # The number of CBVs to use ncbv = model.cbv_num # Need to treat short and long cadences differently if model.cadence == 'lc': # Loop over all the light curve segments m = [None for b in range(len(model.breakpoints))] weights = [None for b in range(len(model.breakpoints))] for b in range(len(model.breakpoints)): # Get the indices for this light curve segment inds = model.get_chunk(b, pad=False) masked_inds = model.get_masked_chunk(b, pad=False) # Regress mX = model.XCBV[masked_inds, :ncbv + 1] A = np.dot(mX.T, mX) B = np.dot(mX.T, model.flux[masked_inds]) try: weights[b] = np.linalg.solve(A, B) except np.linalg.linalg.LinAlgError: # Singular matrix log.warn('Singular matrix!') weights[b] = np.zeros(mX.shape[1]) m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b]) # Vertical alignment if b == 0: m[b] -= np.nanmedian(m[b]) else: # Match the first finite model point on either side of the # break # We could consider something more elaborate in the future i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i]) for i in range(1, len(m[b - 1]) - 1)]) i1 = np.argmax([np.isfinite(m[b][i]) for i in range(len(m[b]))]) m[b] += (m[b - 1][i0] - m[b][i1]) # Join model and normalize m = np.concatenate(m) m -= np.nanmedian(m) else: # Interpolate over outliers so we don't have to worry # about masking the arrays below flux = Interpolate(model.time, model.mask, model.flux) # Get downbinned light curve newsize = len(model.time) // 30 time = Downbin(model.time, newsize, operation='mean') flux = Downbin(flux, newsize, operation='mean') # Get LC breakpoints breakpoints = list(Breakpoints( model.ID, season=model.season, cadence='lc')) breakpoints += [len(time) - 1] # Loop over all the light curve segments m = [None for b in range(len(breakpoints))] weights = [None for b in range(len(breakpoints))] for b in range(len(breakpoints)): # Get the indices for this light curve segment M = np.arange(len(time)) if b > 0: inds = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])] else: inds = M[M <= breakpoints[b]] # Regress A = np.dot(model.XCBV[inds, :ncbv + 1].T, model.XCBV[inds, :ncbv + 1]) B = np.dot(model.XCBV[inds, :ncbv + 1].T, flux[inds]) weights[b] = np.linalg.solve(A, B) m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b]) # Vertical alignment if b == 0: m[b] -= np.nanmedian(m[b]) else: # Match the first finite model point on either side of the # break # We could consider something more elaborate in the future i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i]) for i in range(1, len(m[b - 1]) - 1)]) i1 = np.argmax([np.isfinite(m[b][i]) for i in range(len(m[b]))]) m[b] += (m[b - 1][i0] - m[b][i1]) # Join model and normalize m = np.concatenate(m) m -= np.nanmedian(m) # Finally, interpolate back to short cadence m = np.interp(model.time, time, m) return m
[ "def", "FitCBVs", "(", "model", ")", ":", "# Get cbvs?", "if", "model", ".", "XCBV", "is", "None", ":", "GetTargetCBVs", "(", "model", ")", "# The number of CBVs to use", "ncbv", "=", "model", ".", "cbv_num", "# Need to treat short and long cadences differently", "if", "model", ".", "cadence", "==", "'lc'", ":", "# Loop over all the light curve segments", "m", "=", "[", "None", "for", "b", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", "]", "weights", "=", "[", "None", "for", "b", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", "]", "for", "b", "in", "range", "(", "len", "(", "model", ".", "breakpoints", ")", ")", ":", "# Get the indices for this light curve segment", "inds", "=", "model", ".", "get_chunk", "(", "b", ",", "pad", "=", "False", ")", "masked_inds", "=", "model", ".", "get_masked_chunk", "(", "b", ",", "pad", "=", "False", ")", "# Regress", "mX", "=", "model", ".", "XCBV", "[", "masked_inds", ",", ":", "ncbv", "+", "1", "]", "A", "=", "np", ".", "dot", "(", "mX", ".", "T", ",", "mX", ")", "B", "=", "np", ".", "dot", "(", "mX", ".", "T", ",", "model", ".", "flux", "[", "masked_inds", "]", ")", "try", ":", "weights", "[", "b", "]", "=", "np", ".", "linalg", ".", "solve", "(", "A", ",", "B", ")", "except", "np", ".", "linalg", ".", "linalg", ".", "LinAlgError", ":", "# Singular matrix", "log", ".", "warn", "(", "'Singular matrix!'", ")", "weights", "[", "b", "]", "=", "np", ".", "zeros", "(", "mX", ".", "shape", "[", "1", "]", ")", "m", "[", "b", "]", "=", "np", ".", "dot", "(", "model", ".", "XCBV", "[", "inds", ",", ":", "ncbv", "+", "1", "]", ",", "weights", "[", "b", "]", ")", "# Vertical alignment", "if", "b", "==", "0", ":", "m", "[", "b", "]", "-=", "np", ".", "nanmedian", "(", "m", "[", "b", "]", ")", "else", ":", "# Match the first finite model point on either side of the", "# break", "# We could consider something more elaborate in the future", "i0", "=", "-", "1", "-", "np", ".", "argmax", "(", "[", "np", ".", "isfinite", "(", "m", "[", "b", "-", "1", "]", "[", "-", "i", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "m", "[", "b", "-", "1", "]", ")", "-", "1", ")", "]", ")", "i1", "=", "np", ".", "argmax", "(", "[", "np", ".", "isfinite", "(", "m", "[", "b", "]", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "m", "[", "b", "]", ")", ")", "]", ")", "m", "[", "b", "]", "+=", "(", "m", "[", "b", "-", "1", "]", "[", "i0", "]", "-", "m", "[", "b", "]", "[", "i1", "]", ")", "# Join model and normalize", "m", "=", "np", ".", "concatenate", "(", "m", ")", "m", "-=", "np", ".", "nanmedian", "(", "m", ")", "else", ":", "# Interpolate over outliers so we don't have to worry", "# about masking the arrays below", "flux", "=", "Interpolate", "(", "model", ".", "time", ",", "model", ".", "mask", ",", "model", ".", "flux", ")", "# Get downbinned light curve", "newsize", "=", "len", "(", "model", ".", "time", ")", "//", "30", "time", "=", "Downbin", "(", "model", ".", "time", ",", "newsize", ",", "operation", "=", "'mean'", ")", "flux", "=", "Downbin", "(", "flux", ",", "newsize", ",", "operation", "=", "'mean'", ")", "# Get LC breakpoints", "breakpoints", "=", "list", "(", "Breakpoints", "(", "model", ".", "ID", ",", "season", "=", "model", ".", "season", ",", "cadence", "=", "'lc'", ")", ")", "breakpoints", "+=", "[", "len", "(", "time", ")", "-", "1", "]", "# Loop over all the light curve segments", "m", "=", "[", "None", "for", "b", "in", "range", "(", "len", "(", "breakpoints", ")", ")", "]", "weights", "=", "[", "None", "for", "b", "in", "range", "(", "len", "(", "breakpoints", ")", ")", "]", "for", "b", "in", "range", "(", "len", "(", "breakpoints", ")", ")", ":", "# Get the indices for this light curve segment", "M", "=", "np", ".", "arange", "(", "len", "(", "time", ")", ")", "if", "b", ">", "0", ":", "inds", "=", "M", "[", "(", "M", ">", "breakpoints", "[", "b", "-", "1", "]", ")", "&", "(", "M", "<=", "breakpoints", "[", "b", "]", ")", "]", "else", ":", "inds", "=", "M", "[", "M", "<=", "breakpoints", "[", "b", "]", "]", "# Regress", "A", "=", "np", ".", "dot", "(", "model", ".", "XCBV", "[", "inds", ",", ":", "ncbv", "+", "1", "]", ".", "T", ",", "model", ".", "XCBV", "[", "inds", ",", ":", "ncbv", "+", "1", "]", ")", "B", "=", "np", ".", "dot", "(", "model", ".", "XCBV", "[", "inds", ",", ":", "ncbv", "+", "1", "]", ".", "T", ",", "flux", "[", "inds", "]", ")", "weights", "[", "b", "]", "=", "np", ".", "linalg", ".", "solve", "(", "A", ",", "B", ")", "m", "[", "b", "]", "=", "np", ".", "dot", "(", "model", ".", "XCBV", "[", "inds", ",", ":", "ncbv", "+", "1", "]", ",", "weights", "[", "b", "]", ")", "# Vertical alignment", "if", "b", "==", "0", ":", "m", "[", "b", "]", "-=", "np", ".", "nanmedian", "(", "m", "[", "b", "]", ")", "else", ":", "# Match the first finite model point on either side of the", "# break", "# We could consider something more elaborate in the future", "i0", "=", "-", "1", "-", "np", ".", "argmax", "(", "[", "np", ".", "isfinite", "(", "m", "[", "b", "-", "1", "]", "[", "-", "i", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "m", "[", "b", "-", "1", "]", ")", "-", "1", ")", "]", ")", "i1", "=", "np", ".", "argmax", "(", "[", "np", ".", "isfinite", "(", "m", "[", "b", "]", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "m", "[", "b", "]", ")", ")", "]", ")", "m", "[", "b", "]", "+=", "(", "m", "[", "b", "-", "1", "]", "[", "i0", "]", "-", "m", "[", "b", "]", "[", "i1", "]", ")", "# Join model and normalize", "m", "=", "np", ".", "concatenate", "(", "m", ")", "m", "-=", "np", ".", "nanmedian", "(", "m", ")", "# Finally, interpolate back to short cadence", "m", "=", "np", ".", "interp", "(", "model", ".", "time", ",", "time", ",", "m", ")", "return", "m" ]
Fits the CBV design matrix to the de-trended flux of a given target. This is called internally whenever the user accesses the :py:attr:`fcor` attribute. :param model: An instance of the :py:obj:`everest` model for the target
[ "Fits", "the", "CBV", "design", "matrix", "to", "the", "de", "-", "trended", "flux", "of", "a", "given", "target", ".", "This", "is", "called", "internally", "whenever", "the", "user", "accesses", "the", ":", "py", ":", "attr", ":", "fcor", "attribute", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1870-L1984
rodluger/everest
everest/missions/k2/k2.py
StatsToCSV
def StatsToCSV(campaign, model='nPLD'): ''' Generate the CSV file used in the search database for the documentation. ''' statsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (campaign, model)) csvfile = os.path.join(os.path.dirname(EVEREST_SRC), 'docs', 'c%02d.csv' % campaign) epic, kp, cdpp6r, cdpp6, _, _, _, _, saturated = \ np.loadtxt(statsfile, unpack=True, skiprows=2) with open(csvfile, 'w') as f: print('c%02d' % campaign, file=f) for i in range(len(epic)): print('%09d,%.3f,%.3f,%.3f,%d' % (epic[i], kp[i], cdpp6r[i], cdpp6[i], int(saturated[i])), file=f)
python
def StatsToCSV(campaign, model='nPLD'): ''' Generate the CSV file used in the search database for the documentation. ''' statsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (campaign, model)) csvfile = os.path.join(os.path.dirname(EVEREST_SRC), 'docs', 'c%02d.csv' % campaign) epic, kp, cdpp6r, cdpp6, _, _, _, _, saturated = \ np.loadtxt(statsfile, unpack=True, skiprows=2) with open(csvfile, 'w') as f: print('c%02d' % campaign, file=f) for i in range(len(epic)): print('%09d,%.3f,%.3f,%.3f,%d' % (epic[i], kp[i], cdpp6r[i], cdpp6[i], int(saturated[i])), file=f)
[ "def", "StatsToCSV", "(", "campaign", ",", "model", "=", "'nPLD'", ")", ":", "statsfile", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "campaign", ",", "model", ")", ")", "csvfile", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "EVEREST_SRC", ")", ",", "'docs'", ",", "'c%02d.csv'", "%", "campaign", ")", "epic", ",", "kp", ",", "cdpp6r", ",", "cdpp6", ",", "_", ",", "_", ",", "_", ",", "_", ",", "saturated", "=", "np", ".", "loadtxt", "(", "statsfile", ",", "unpack", "=", "True", ",", "skiprows", "=", "2", ")", "with", "open", "(", "csvfile", ",", "'w'", ")", "as", "f", ":", "print", "(", "'c%02d'", "%", "campaign", ",", "file", "=", "f", ")", "for", "i", "in", "range", "(", "len", "(", "epic", ")", ")", ":", "print", "(", "'%09d,%.3f,%.3f,%.3f,%d'", "%", "(", "epic", "[", "i", "]", ",", "kp", "[", "i", "]", ",", "cdpp6r", "[", "i", "]", ",", "cdpp6", "[", "i", "]", ",", "int", "(", "saturated", "[", "i", "]", ")", ")", ",", "file", "=", "f", ")" ]
Generate the CSV file used in the search database for the documentation.
[ "Generate", "the", "CSV", "file", "used", "in", "the", "search", "database", "for", "the", "documentation", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/k2.py#L1987-L2004
lsbardel/python-stdnet
stdnet/utils/zset.py
zset.remove
def remove(self, item): '''Remove ``item`` for the :class:`zset` it it exists. If found it returns the score of the item removed.''' score = self._dict.pop(item, None) if score is not None: self._sl.remove(score) return score
python
def remove(self, item): '''Remove ``item`` for the :class:`zset` it it exists. If found it returns the score of the item removed.''' score = self._dict.pop(item, None) if score is not None: self._sl.remove(score) return score
[ "def", "remove", "(", "self", ",", "item", ")", ":", "score", "=", "self", ".", "_dict", ".", "pop", "(", "item", ",", "None", ")", "if", "score", "is", "not", "None", ":", "self", ".", "_sl", ".", "remove", "(", "score", ")", "return", "score" ]
Remove ``item`` for the :class:`zset` it it exists. If found it returns the score of the item removed.
[ "Remove", "item", "for", "the", ":", "class", ":", "zset", "it", "it", "exists", ".", "If", "found", "it", "returns", "the", "score", "of", "the", "item", "removed", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/zset.py#L52-L58
lsbardel/python-stdnet
stdnet/apps/columnts/redis.py
RedisColumnTS.fields
def fields(self): '''Return a tuple of ordered fields for this :class:`ColumnTS`.''' key = self.id + ':fields' encoding = self.client.encoding return tuple(sorted((f.decode(encoding) for f in self.client.smembers(key))))
python
def fields(self): '''Return a tuple of ordered fields for this :class:`ColumnTS`.''' key = self.id + ':fields' encoding = self.client.encoding return tuple(sorted((f.decode(encoding) for f in self.client.smembers(key))))
[ "def", "fields", "(", "self", ")", ":", "key", "=", "self", ".", "id", "+", "':fields'", "encoding", "=", "self", ".", "client", ".", "encoding", "return", "tuple", "(", "sorted", "(", "(", "f", ".", "decode", "(", "encoding", ")", "for", "f", "in", "self", ".", "client", ".", "smembers", "(", "key", ")", ")", ")", ")" ]
Return a tuple of ordered fields for this :class:`ColumnTS`.
[ "Return", "a", "tuple", "of", "ordered", "fields", "for", "this", ":", "class", ":", "ColumnTS", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/columnts/redis.py#L38-L43
lsbardel/python-stdnet
stdnet/odm/related.py
do_pending_lookups
def do_pending_lookups(event, sender, **kwargs): """Handle any pending relations to the sending model. Sent from class_prepared.""" key = (sender._meta.app_label, sender._meta.name) for callback in pending_lookups.pop(key, []): callback(sender)
python
def do_pending_lookups(event, sender, **kwargs): """Handle any pending relations to the sending model. Sent from class_prepared.""" key = (sender._meta.app_label, sender._meta.name) for callback in pending_lookups.pop(key, []): callback(sender)
[ "def", "do_pending_lookups", "(", "event", ",", "sender", ",", "*", "*", "kwargs", ")", ":", "key", "=", "(", "sender", ".", "_meta", ".", "app_label", ",", "sender", ".", "_meta", ".", "name", ")", "for", "callback", "in", "pending_lookups", ".", "pop", "(", "key", ",", "[", "]", ")", ":", "callback", "(", "sender", ")" ]
Handle any pending relations to the sending model. Sent from class_prepared.
[ "Handle", "any", "pending", "relations", "to", "the", "sending", "model", ".", "Sent", "from", "class_prepared", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L66-L71
lsbardel/python-stdnet
stdnet/odm/related.py
Many2ManyThroughModel
def Many2ManyThroughModel(field): '''Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.''' from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField name_model = field.model._meta.name name_relmodel = field.relmodel._meta.name # The two models are the same. if name_model == name_relmodel: name_relmodel += '2' through = field.through # Create the through model if through is None: name = '{0}_{1}'.format(name_model, name_relmodel) class Meta: app_label = field.model._meta.app_label through = ModelType(name, (StdModel,), {'Meta': Meta}) field.through = through # The first field field1 = ForeignKey(field.model, related_name=field.name, related_manager_class=makeMany2ManyRelatedManager( field.relmodel, name_model, name_relmodel) ) field1.register_with_model(name_model, through) # The second field field2 = ForeignKey(field.relmodel, related_name=field.related_name, related_manager_class=makeMany2ManyRelatedManager( field.model, name_relmodel, name_model) ) field2.register_with_model(name_relmodel, through) pk = CompositeIdField(name_model, name_relmodel) pk.register_with_model('id', through)
python
def Many2ManyThroughModel(field): '''Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.''' from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField name_model = field.model._meta.name name_relmodel = field.relmodel._meta.name # The two models are the same. if name_model == name_relmodel: name_relmodel += '2' through = field.through # Create the through model if through is None: name = '{0}_{1}'.format(name_model, name_relmodel) class Meta: app_label = field.model._meta.app_label through = ModelType(name, (StdModel,), {'Meta': Meta}) field.through = through # The first field field1 = ForeignKey(field.model, related_name=field.name, related_manager_class=makeMany2ManyRelatedManager( field.relmodel, name_model, name_relmodel) ) field1.register_with_model(name_model, through) # The second field field2 = ForeignKey(field.relmodel, related_name=field.related_name, related_manager_class=makeMany2ManyRelatedManager( field.model, name_relmodel, name_model) ) field2.register_with_model(name_relmodel, through) pk = CompositeIdField(name_model, name_relmodel) pk.register_with_model('id', through)
[ "def", "Many2ManyThroughModel", "(", "field", ")", ":", "from", "stdnet", ".", "odm", "import", "ModelType", ",", "StdModel", ",", "ForeignKey", ",", "CompositeIdField", "name_model", "=", "field", ".", "model", ".", "_meta", ".", "name", "name_relmodel", "=", "field", ".", "relmodel", ".", "_meta", ".", "name", "# The two models are the same.\r", "if", "name_model", "==", "name_relmodel", ":", "name_relmodel", "+=", "'2'", "through", "=", "field", ".", "through", "# Create the through model\r", "if", "through", "is", "None", ":", "name", "=", "'{0}_{1}'", ".", "format", "(", "name_model", ",", "name_relmodel", ")", "class", "Meta", ":", "app_label", "=", "field", ".", "model", ".", "_meta", ".", "app_label", "through", "=", "ModelType", "(", "name", ",", "(", "StdModel", ",", ")", ",", "{", "'Meta'", ":", "Meta", "}", ")", "field", ".", "through", "=", "through", "# The first field\r", "field1", "=", "ForeignKey", "(", "field", ".", "model", ",", "related_name", "=", "field", ".", "name", ",", "related_manager_class", "=", "makeMany2ManyRelatedManager", "(", "field", ".", "relmodel", ",", "name_model", ",", "name_relmodel", ")", ")", "field1", ".", "register_with_model", "(", "name_model", ",", "through", ")", "# The second field\r", "field2", "=", "ForeignKey", "(", "field", ".", "relmodel", ",", "related_name", "=", "field", ".", "related_name", ",", "related_manager_class", "=", "makeMany2ManyRelatedManager", "(", "field", ".", "model", ",", "name_relmodel", ",", "name_model", ")", ")", "field2", ".", "register_with_model", "(", "name_relmodel", ",", "through", ")", "pk", "=", "CompositeIdField", "(", "name_model", ",", "name_relmodel", ")", "pk", ".", "register_with_model", "(", "'id'", ",", "through", ")" ]
Create a Many2Many through model with two foreign key fields and a CompositeFieldId depending on the two foreign keys.
[ "Create", "a", "Many2Many", "through", "model", "with", "two", "foreign", "key", "fields", "and", "a", "CompositeFieldId", "depending", "on", "the", "two", "foreign", "keys", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L77-L114
lsbardel/python-stdnet
stdnet/odm/related.py
makeMany2ManyRelatedManager
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel): '''formodel is the model which the manager .''' class _Many2ManyRelatedManager(Many2ManyRelatedManager): pass _Many2ManyRelatedManager.formodel = formodel _Many2ManyRelatedManager.name_relmodel = name_relmodel _Many2ManyRelatedManager.name_formodel = name_formodel return _Many2ManyRelatedManager
python
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel): '''formodel is the model which the manager .''' class _Many2ManyRelatedManager(Many2ManyRelatedManager): pass _Many2ManyRelatedManager.formodel = formodel _Many2ManyRelatedManager.name_relmodel = name_relmodel _Many2ManyRelatedManager.name_formodel = name_formodel return _Many2ManyRelatedManager
[ "def", "makeMany2ManyRelatedManager", "(", "formodel", ",", "name_relmodel", ",", "name_formodel", ")", ":", "class", "_Many2ManyRelatedManager", "(", "Many2ManyRelatedManager", ")", ":", "pass", "_Many2ManyRelatedManager", ".", "formodel", "=", "formodel", "_Many2ManyRelatedManager", ".", "name_relmodel", "=", "name_relmodel", "_Many2ManyRelatedManager", ".", "name_formodel", "=", "name_formodel", "return", "_Many2ManyRelatedManager" ]
formodel is the model which the manager .
[ "formodel", "is", "the", "model", "which", "the", "manager", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L265-L274
lsbardel/python-stdnet
stdnet/odm/related.py
RelatedManager.session
def session(self, session=None): '''Override :meth:`Manager.session` so that this :class:`RelatedManager` can retrieve the session from the :attr:`related_instance` if available. ''' if self.related_instance: session = self.related_instance.session # we have a session, we either create a new one return the same session if session is None: raise QuerySetError('Related manager can be accessed only from\ a loaded instance of its related model.') return session
python
def session(self, session=None): '''Override :meth:`Manager.session` so that this :class:`RelatedManager` can retrieve the session from the :attr:`related_instance` if available. ''' if self.related_instance: session = self.related_instance.session # we have a session, we either create a new one return the same session if session is None: raise QuerySetError('Related manager can be accessed only from\ a loaded instance of its related model.') return session
[ "def", "session", "(", "self", ",", "session", "=", "None", ")", ":", "if", "self", ".", "related_instance", ":", "session", "=", "self", ".", "related_instance", ".", "session", "# we have a session, we either create a new one return the same session\r", "if", "session", "is", "None", ":", "raise", "QuerySetError", "(", "'Related manager can be accessed only from\\\r\n a loaded instance of its related model.'", ")", "return", "session" ]
Override :meth:`Manager.session` so that this :class:`RelatedManager` can retrieve the session from the :attr:`related_instance` if available.
[ "Override", ":", "meth", ":", "Manager", ".", "session", "so", "that", "this", ":", "class", ":", "RelatedManager", "can", "retrieve", "the", "session", "from", "the", ":", "attr", ":", "related_instance", "if", "available", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L176-L187
lsbardel/python-stdnet
stdnet/odm/related.py
Many2ManyRelatedManager.add
def add(self, value, session=None, **kwargs): '''Add ``value``, an instance of :attr:`formodel` to the :attr:`through` model. This method can only be accessed by an instance of the model for which this related manager is an attribute.''' s, instance = self.session_instance('add', value, session, **kwargs) return s.add(instance)
python
def add(self, value, session=None, **kwargs): '''Add ``value``, an instance of :attr:`formodel` to the :attr:`through` model. This method can only be accessed by an instance of the model for which this related manager is an attribute.''' s, instance = self.session_instance('add', value, session, **kwargs) return s.add(instance)
[ "def", "add", "(", "self", ",", "value", ",", "session", "=", "None", ",", "*", "*", "kwargs", ")", ":", "s", ",", "instance", "=", "self", ".", "session_instance", "(", "'add'", ",", "value", ",", "session", ",", "*", "*", "kwargs", ")", "return", "s", ".", "add", "(", "instance", ")" ]
Add ``value``, an instance of :attr:`formodel` to the :attr:`through` model. This method can only be accessed by an instance of the model for which this related manager is an attribute.
[ "Add", "value", "an", "instance", "of", ":", "attr", ":", "formodel", "to", "the", ":", "attr", ":", "through", "model", ".", "This", "method", "can", "only", "be", "accessed", "by", "an", "instance", "of", "the", "model", "for", "which", "this", "related", "manager", "is", "an", "attribute", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L237-L242
lsbardel/python-stdnet
stdnet/odm/related.py
Many2ManyRelatedManager.remove
def remove(self, value, session=None): '''Remove *value*, an instance of ``self.model`` from the set of elements contained by the field.''' s, instance = self.session_instance('remove', value, session) # update state so that the instance does look persistent instance.get_state(iid=instance.pkvalue(), action='update') return s.delete(instance)
python
def remove(self, value, session=None): '''Remove *value*, an instance of ``self.model`` from the set of elements contained by the field.''' s, instance = self.session_instance('remove', value, session) # update state so that the instance does look persistent instance.get_state(iid=instance.pkvalue(), action='update') return s.delete(instance)
[ "def", "remove", "(", "self", ",", "value", ",", "session", "=", "None", ")", ":", "s", ",", "instance", "=", "self", ".", "session_instance", "(", "'remove'", ",", "value", ",", "session", ")", "# update state so that the instance does look persistent\r", "instance", ".", "get_state", "(", "iid", "=", "instance", ".", "pkvalue", "(", ")", ",", "action", "=", "'update'", ")", "return", "s", ".", "delete", "(", "instance", ")" ]
Remove *value*, an instance of ``self.model`` from the set of elements contained by the field.
[ "Remove", "*", "value", "*", "an", "instance", "of", "self", ".", "model", "from", "the", "set", "of", "elements", "contained", "by", "the", "field", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L244-L250
lsbardel/python-stdnet
stdnet/apps/searchengine/processors/__init__.py
metaphone_processor
def metaphone_processor(words): '''Double metaphone word processor.''' for word in words: for w in double_metaphone(word): if w: w = w.strip() if w: yield w
python
def metaphone_processor(words): '''Double metaphone word processor.''' for word in words: for w in double_metaphone(word): if w: w = w.strip() if w: yield w
[ "def", "metaphone_processor", "(", "words", ")", ":", "for", "word", "in", "words", ":", "for", "w", "in", "double_metaphone", "(", "word", ")", ":", "if", "w", ":", "w", "=", "w", ".", "strip", "(", ")", "if", "w", ":", "yield", "w" ]
Double metaphone word processor.
[ "Double", "metaphone", "word", "processor", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/processors/__init__.py#L18-L25
lsbardel/python-stdnet
stdnet/apps/searchengine/processors/__init__.py
tolerant_metaphone_processor
def tolerant_metaphone_processor(words): '''Double metaphone word processor slightly modified so that when no words are returned by the algorithm, the original word is returned.''' for word in words: r = 0 for w in double_metaphone(word): if w: w = w.strip() if w: r += 1 yield w if not r: yield word
python
def tolerant_metaphone_processor(words): '''Double metaphone word processor slightly modified so that when no words are returned by the algorithm, the original word is returned.''' for word in words: r = 0 for w in double_metaphone(word): if w: w = w.strip() if w: r += 1 yield w if not r: yield word
[ "def", "tolerant_metaphone_processor", "(", "words", ")", ":", "for", "word", "in", "words", ":", "r", "=", "0", "for", "w", "in", "double_metaphone", "(", "word", ")", ":", "if", "w", ":", "w", "=", "w", ".", "strip", "(", ")", "if", "w", ":", "r", "+=", "1", "yield", "w", "if", "not", "r", ":", "yield", "word" ]
Double metaphone word processor slightly modified so that when no words are returned by the algorithm, the original word is returned.
[ "Double", "metaphone", "word", "processor", "slightly", "modified", "so", "that", "when", "no", "words", "are", "returned", "by", "the", "algorithm", "the", "original", "word", "is", "returned", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/processors/__init__.py#L28-L40
lsbardel/python-stdnet
stdnet/apps/searchengine/processors/__init__.py
stemming_processor
def stemming_processor(words): '''Porter Stemmer word processor''' stem = PorterStemmer().stem for word in words: word = stem(word, 0, len(word)-1) yield word
python
def stemming_processor(words): '''Porter Stemmer word processor''' stem = PorterStemmer().stem for word in words: word = stem(word, 0, len(word)-1) yield word
[ "def", "stemming_processor", "(", "words", ")", ":", "stem", "=", "PorterStemmer", "(", ")", ".", "stem", "for", "word", "in", "words", ":", "word", "=", "stem", "(", "word", ",", "0", ",", "len", "(", "word", ")", "-", "1", ")", "yield", "word" ]
Porter Stemmer word processor
[ "Porter", "Stemmer", "word", "processor" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/apps/searchengine/processors/__init__.py#L43-L48
rodluger/everest
everest/pool.py
Pool
def Pool(pool='AnyPool', **kwargs): ''' Chooses between the different pools. If ``pool == 'AnyPool'``, chooses based on availability. ''' if pool == 'MPIPool': return MPIPool(**kwargs) elif pool == 'MultiPool': return MultiPool(**kwargs) elif pool == 'SerialPool': return SerialPool(**kwargs) elif pool == 'AnyPool': if MPIPool.enabled(): return MPIPool(**kwargs) elif MultiPool.enabled(): return MultiPool(**kwargs) else: return SerialPool(**kwargs) else: raise ValueError('Invalid pool ``%s``.' % pool)
python
def Pool(pool='AnyPool', **kwargs): ''' Chooses between the different pools. If ``pool == 'AnyPool'``, chooses based on availability. ''' if pool == 'MPIPool': return MPIPool(**kwargs) elif pool == 'MultiPool': return MultiPool(**kwargs) elif pool == 'SerialPool': return SerialPool(**kwargs) elif pool == 'AnyPool': if MPIPool.enabled(): return MPIPool(**kwargs) elif MultiPool.enabled(): return MultiPool(**kwargs) else: return SerialPool(**kwargs) else: raise ValueError('Invalid pool ``%s``.' % pool)
[ "def", "Pool", "(", "pool", "=", "'AnyPool'", ",", "*", "*", "kwargs", ")", ":", "if", "pool", "==", "'MPIPool'", ":", "return", "MPIPool", "(", "*", "*", "kwargs", ")", "elif", "pool", "==", "'MultiPool'", ":", "return", "MultiPool", "(", "*", "*", "kwargs", ")", "elif", "pool", "==", "'SerialPool'", ":", "return", "SerialPool", "(", "*", "*", "kwargs", ")", "elif", "pool", "==", "'AnyPool'", ":", "if", "MPIPool", ".", "enabled", "(", ")", ":", "return", "MPIPool", "(", "*", "*", "kwargs", ")", "elif", "MultiPool", ".", "enabled", "(", ")", ":", "return", "MultiPool", "(", "*", "*", "kwargs", ")", "else", ":", "return", "SerialPool", "(", "*", "*", "kwargs", ")", "else", ":", "raise", "ValueError", "(", "'Invalid pool ``%s``.'", "%", "pool", ")" ]
Chooses between the different pools. If ``pool == 'AnyPool'``, chooses based on availability.
[ "Chooses", "between", "the", "different", "pools", ".", "If", "pool", "==", "AnyPool", "chooses", "based", "on", "availability", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/pool.py#L504-L525
rodluger/everest
everest/pool.py
MPIPool.wait
def wait(self): """ If this isn't the master process, wait for instructions. """ if self.is_master(): raise RuntimeError("Master node told to await jobs.") status = MPI.Status() while True: # Event loop. # Sit here and await instructions. if self.debug: print("Worker {0} waiting for task.".format(self.rank)) # Blocking receive to wait for instructions. task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status) if self.debug: print("Worker {0} got task {1} with tag {2}." .format(self.rank, type(task), status.tag)) # Check if message is special sentinel signaling end. # If so, stop. if isinstance(task, _close_pool_message): if self.debug: print("Worker {0} told to quit.".format(self.rank)) break # Check if message is special type containing new function # to be applied if isinstance(task, _function_wrapper): self.function = task.function if self.debug: print("Worker {0} replaced its task function: {1}." .format(self.rank, self.function)) continue # If not a special message, just run the known function on # the input and return it asynchronously. result = self.function(task) if self.debug: print("Worker {0} sending answer {1} with tag {2}." .format(self.rank, type(result), status.tag)) self.comm.isend(result, dest=0, tag=status.tag) # Kill the process? if self.exit_on_end: sys.exit()
python
def wait(self): """ If this isn't the master process, wait for instructions. """ if self.is_master(): raise RuntimeError("Master node told to await jobs.") status = MPI.Status() while True: # Event loop. # Sit here and await instructions. if self.debug: print("Worker {0} waiting for task.".format(self.rank)) # Blocking receive to wait for instructions. task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status) if self.debug: print("Worker {0} got task {1} with tag {2}." .format(self.rank, type(task), status.tag)) # Check if message is special sentinel signaling end. # If so, stop. if isinstance(task, _close_pool_message): if self.debug: print("Worker {0} told to quit.".format(self.rank)) break # Check if message is special type containing new function # to be applied if isinstance(task, _function_wrapper): self.function = task.function if self.debug: print("Worker {0} replaced its task function: {1}." .format(self.rank, self.function)) continue # If not a special message, just run the known function on # the input and return it asynchronously. result = self.function(task) if self.debug: print("Worker {0} sending answer {1} with tag {2}." .format(self.rank, type(result), status.tag)) self.comm.isend(result, dest=0, tag=status.tag) # Kill the process? if self.exit_on_end: sys.exit()
[ "def", "wait", "(", "self", ")", ":", "if", "self", ".", "is_master", "(", ")", ":", "raise", "RuntimeError", "(", "\"Master node told to await jobs.\"", ")", "status", "=", "MPI", ".", "Status", "(", ")", "while", "True", ":", "# Event loop.", "# Sit here and await instructions.", "if", "self", ".", "debug", ":", "print", "(", "\"Worker {0} waiting for task.\"", ".", "format", "(", "self", ".", "rank", ")", ")", "# Blocking receive to wait for instructions.", "task", "=", "self", ".", "comm", ".", "recv", "(", "source", "=", "0", ",", "tag", "=", "MPI", ".", "ANY_TAG", ",", "status", "=", "status", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Worker {0} got task {1} with tag {2}.\"", ".", "format", "(", "self", ".", "rank", ",", "type", "(", "task", ")", ",", "status", ".", "tag", ")", ")", "# Check if message is special sentinel signaling end.", "# If so, stop.", "if", "isinstance", "(", "task", ",", "_close_pool_message", ")", ":", "if", "self", ".", "debug", ":", "print", "(", "\"Worker {0} told to quit.\"", ".", "format", "(", "self", ".", "rank", ")", ")", "break", "# Check if message is special type containing new function", "# to be applied", "if", "isinstance", "(", "task", ",", "_function_wrapper", ")", ":", "self", ".", "function", "=", "task", ".", "function", "if", "self", ".", "debug", ":", "print", "(", "\"Worker {0} replaced its task function: {1}.\"", ".", "format", "(", "self", ".", "rank", ",", "self", ".", "function", ")", ")", "continue", "# If not a special message, just run the known function on", "# the input and return it asynchronously.", "result", "=", "self", ".", "function", "(", "task", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Worker {0} sending answer {1} with tag {2}.\"", ".", "format", "(", "self", ".", "rank", ",", "type", "(", "result", ")", ",", "status", ".", "tag", ")", ")", "self", ".", "comm", ".", "isend", "(", "result", ",", "dest", "=", "0", ",", "tag", "=", "status", ".", "tag", ")", "# Kill the process?", "if", "self", ".", "exit_on_end", ":", "sys", ".", "exit", "(", ")" ]
If this isn't the master process, wait for instructions.
[ "If", "this", "isn", "t", "the", "master", "process", "wait", "for", "instructions", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/pool.py#L220-L269
rodluger/everest
everest/pool.py
MPIPool.map
def map(self, function, tasks): """ Like the built-in :py:func:`map` function, apply a function to all of the values in a list and return the list of results. :param function: The function to apply to the list. :param tasks: The list of elements. """ ntask = len(tasks) # If not the master just wait for instructions. if not self.is_master(): self.wait() return if function is not self.function: if self.debug: print("Master replacing pool function with {0}." .format(function)) self.function = function F = _function_wrapper(function) # Tell all the workers what function to use. requests = [] for i in range(self.size): r = self.comm.isend(F, dest=i + 1) requests.append(r) # Wait until all of the workers have responded. See: # https://gist.github.com/4176241 MPI.Request.waitall(requests) if (not self.loadbalance) or (ntask <= self.size): # Do not perform load-balancing - the default load-balancing # scheme emcee uses. # Send all the tasks off and wait for them to be received. # Again, see the bug in the above gist. requests = [] for i, task in enumerate(tasks): worker = i % self.size + 1 if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) r = self.comm.isend(task, dest=worker, tag=i) requests.append(r) MPI.Request.waitall(requests) # Now wait for the answers. results = [] for i in range(ntask): worker = i % self.size + 1 if self.debug: print("Master waiting for worker {0} with tag {1}" .format(worker, i)) result = self.comm.recv(source=worker, tag=i) results.append(result) return results else: # Perform load-balancing. The order of the results are likely to # be different from the previous case. for i, task in enumerate(tasks[0:self.size]): worker = i + 1 if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) # Send out the tasks asynchronously. self.comm.isend(task, dest=worker, tag=i) ntasks_dispatched = self.size results = [None] * ntask for itask in range(ntask): status = MPI.Status() # Receive input from workers. try: result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) except Exception as e: self.close() raise e worker = status.source i = status.tag results[i] = result if self.debug: print("Master received from worker {0} with tag {1}" .format(worker, i)) # Now send the next task to this idle worker (if there are any # left). if ntasks_dispatched < ntask: task = tasks[ntasks_dispatched] i = ntasks_dispatched if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) # Send out the tasks asynchronously. self.comm.isend(task, dest=worker, tag=i) ntasks_dispatched += 1 return results
python
def map(self, function, tasks): """ Like the built-in :py:func:`map` function, apply a function to all of the values in a list and return the list of results. :param function: The function to apply to the list. :param tasks: The list of elements. """ ntask = len(tasks) # If not the master just wait for instructions. if not self.is_master(): self.wait() return if function is not self.function: if self.debug: print("Master replacing pool function with {0}." .format(function)) self.function = function F = _function_wrapper(function) # Tell all the workers what function to use. requests = [] for i in range(self.size): r = self.comm.isend(F, dest=i + 1) requests.append(r) # Wait until all of the workers have responded. See: # https://gist.github.com/4176241 MPI.Request.waitall(requests) if (not self.loadbalance) or (ntask <= self.size): # Do not perform load-balancing - the default load-balancing # scheme emcee uses. # Send all the tasks off and wait for them to be received. # Again, see the bug in the above gist. requests = [] for i, task in enumerate(tasks): worker = i % self.size + 1 if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) r = self.comm.isend(task, dest=worker, tag=i) requests.append(r) MPI.Request.waitall(requests) # Now wait for the answers. results = [] for i in range(ntask): worker = i % self.size + 1 if self.debug: print("Master waiting for worker {0} with tag {1}" .format(worker, i)) result = self.comm.recv(source=worker, tag=i) results.append(result) return results else: # Perform load-balancing. The order of the results are likely to # be different from the previous case. for i, task in enumerate(tasks[0:self.size]): worker = i + 1 if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) # Send out the tasks asynchronously. self.comm.isend(task, dest=worker, tag=i) ntasks_dispatched = self.size results = [None] * ntask for itask in range(ntask): status = MPI.Status() # Receive input from workers. try: result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) except Exception as e: self.close() raise e worker = status.source i = status.tag results[i] = result if self.debug: print("Master received from worker {0} with tag {1}" .format(worker, i)) # Now send the next task to this idle worker (if there are any # left). if ntasks_dispatched < ntask: task = tasks[ntasks_dispatched] i = ntasks_dispatched if self.debug: print("Sent task {0} to worker {1} with tag {2}." .format(type(task), worker, i)) # Send out the tasks asynchronously. self.comm.isend(task, dest=worker, tag=i) ntasks_dispatched += 1 return results
[ "def", "map", "(", "self", ",", "function", ",", "tasks", ")", ":", "ntask", "=", "len", "(", "tasks", ")", "# If not the master just wait for instructions.", "if", "not", "self", ".", "is_master", "(", ")", ":", "self", ".", "wait", "(", ")", "return", "if", "function", "is", "not", "self", ".", "function", ":", "if", "self", ".", "debug", ":", "print", "(", "\"Master replacing pool function with {0}.\"", ".", "format", "(", "function", ")", ")", "self", ".", "function", "=", "function", "F", "=", "_function_wrapper", "(", "function", ")", "# Tell all the workers what function to use.", "requests", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "size", ")", ":", "r", "=", "self", ".", "comm", ".", "isend", "(", "F", ",", "dest", "=", "i", "+", "1", ")", "requests", ".", "append", "(", "r", ")", "# Wait until all of the workers have responded. See:", "# https://gist.github.com/4176241", "MPI", ".", "Request", ".", "waitall", "(", "requests", ")", "if", "(", "not", "self", ".", "loadbalance", ")", "or", "(", "ntask", "<=", "self", ".", "size", ")", ":", "# Do not perform load-balancing - the default load-balancing", "# scheme emcee uses.", "# Send all the tasks off and wait for them to be received.", "# Again, see the bug in the above gist.", "requests", "=", "[", "]", "for", "i", ",", "task", "in", "enumerate", "(", "tasks", ")", ":", "worker", "=", "i", "%", "self", ".", "size", "+", "1", "if", "self", ".", "debug", ":", "print", "(", "\"Sent task {0} to worker {1} with tag {2}.\"", ".", "format", "(", "type", "(", "task", ")", ",", "worker", ",", "i", ")", ")", "r", "=", "self", ".", "comm", ".", "isend", "(", "task", ",", "dest", "=", "worker", ",", "tag", "=", "i", ")", "requests", ".", "append", "(", "r", ")", "MPI", ".", "Request", ".", "waitall", "(", "requests", ")", "# Now wait for the answers.", "results", "=", "[", "]", "for", "i", "in", "range", "(", "ntask", ")", ":", "worker", "=", "i", "%", "self", ".", "size", "+", "1", "if", "self", ".", "debug", ":", "print", "(", "\"Master waiting for worker {0} with tag {1}\"", ".", "format", "(", "worker", ",", "i", ")", ")", "result", "=", "self", ".", "comm", ".", "recv", "(", "source", "=", "worker", ",", "tag", "=", "i", ")", "results", ".", "append", "(", "result", ")", "return", "results", "else", ":", "# Perform load-balancing. The order of the results are likely to", "# be different from the previous case.", "for", "i", ",", "task", "in", "enumerate", "(", "tasks", "[", "0", ":", "self", ".", "size", "]", ")", ":", "worker", "=", "i", "+", "1", "if", "self", ".", "debug", ":", "print", "(", "\"Sent task {0} to worker {1} with tag {2}.\"", ".", "format", "(", "type", "(", "task", ")", ",", "worker", ",", "i", ")", ")", "# Send out the tasks asynchronously.", "self", ".", "comm", ".", "isend", "(", "task", ",", "dest", "=", "worker", ",", "tag", "=", "i", ")", "ntasks_dispatched", "=", "self", ".", "size", "results", "=", "[", "None", "]", "*", "ntask", "for", "itask", "in", "range", "(", "ntask", ")", ":", "status", "=", "MPI", ".", "Status", "(", ")", "# Receive input from workers.", "try", ":", "result", "=", "self", ".", "comm", ".", "recv", "(", "source", "=", "MPI", ".", "ANY_SOURCE", ",", "tag", "=", "MPI", ".", "ANY_TAG", ",", "status", "=", "status", ")", "except", "Exception", "as", "e", ":", "self", ".", "close", "(", ")", "raise", "e", "worker", "=", "status", ".", "source", "i", "=", "status", ".", "tag", "results", "[", "i", "]", "=", "result", "if", "self", ".", "debug", ":", "print", "(", "\"Master received from worker {0} with tag {1}\"", ".", "format", "(", "worker", ",", "i", ")", ")", "# Now send the next task to this idle worker (if there are any", "# left).", "if", "ntasks_dispatched", "<", "ntask", ":", "task", "=", "tasks", "[", "ntasks_dispatched", "]", "i", "=", "ntasks_dispatched", "if", "self", ".", "debug", ":", "print", "(", "\"Sent task {0} to worker {1} with tag {2}.\"", ".", "format", "(", "type", "(", "task", ")", ",", "worker", ",", "i", ")", ")", "# Send out the tasks asynchronously.", "self", ".", "comm", ".", "isend", "(", "task", ",", "dest", "=", "worker", ",", "tag", "=", "i", ")", "ntasks_dispatched", "+=", "1", "return", "results" ]
Like the built-in :py:func:`map` function, apply a function to all of the values in a list and return the list of results. :param function: The function to apply to the list. :param tasks: The list of elements.
[ "Like", "the", "built", "-", "in", ":", "py", ":", "func", ":", "map", "function", "apply", "a", "function", "to", "all", "of", "the", "values", "in", "a", "list", "and", "return", "the", "list", "of", "results", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/pool.py#L271-L380
rodluger/everest
everest/pool.py
MPIPool.close
def close(self): """ Just send a message off to all the pool members which contains the special :class:`_close_pool_message` sentinel. """ if self.is_master(): for i in range(self.size): self.comm.isend(_close_pool_message(), dest=i + 1)
python
def close(self): """ Just send a message off to all the pool members which contains the special :class:`_close_pool_message` sentinel. """ if self.is_master(): for i in range(self.size): self.comm.isend(_close_pool_message(), dest=i + 1)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "is_master", "(", ")", ":", "for", "i", "in", "range", "(", "self", ".", "size", ")", ":", "self", ".", "comm", ".", "isend", "(", "_close_pool_message", "(", ")", ",", "dest", "=", "i", "+", "1", ")" ]
Just send a message off to all the pool members which contains the special :class:`_close_pool_message` sentinel.
[ "Just", "send", "a", "message", "off", "to", "all", "the", "pool", "members", "which", "contains", "the", "special", ":", "class", ":", "_close_pool_message", "sentinel", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/pool.py#L388-L396
lsbardel/python-stdnet
stdnet/odm/struct.py
commit_when_no_transaction
def commit_when_no_transaction(f): '''Decorator for committing changes when the instance session is not in a transaction.''' def _(self, *args, **kwargs): r = f(self, *args, **kwargs) return self.session.add(self) if self.session is not None else r _.__name__ = f.__name__ _.__doc__ = f.__doc__ return _
python
def commit_when_no_transaction(f): '''Decorator for committing changes when the instance session is not in a transaction.''' def _(self, *args, **kwargs): r = f(self, *args, **kwargs) return self.session.add(self) if self.session is not None else r _.__name__ = f.__name__ _.__doc__ = f.__doc__ return _
[ "def", "commit_when_no_transaction", "(", "f", ")", ":", "def", "_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "r", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "session", ".", "add", "(", "self", ")", "if", "self", ".", "session", "is", "not", "None", "else", "r", "_", ".", "__name__", "=", "f", ".", "__name__", "_", ".", "__doc__", "=", "f", ".", "__doc__", "return", "_" ]
Decorator for committing changes when the instance session is not in a transaction.
[ "Decorator", "for", "committing", "changes", "when", "the", "instance", "session", "is", "not", "in", "a", "transaction", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L32-L40
lsbardel/python-stdnet
stdnet/odm/struct.py
Structure.backend
def backend(self): '''Returns the :class:`stdnet.BackendStructure`. ''' session = self.session if session is not None: if self._field: return session.model(self._field.model).backend else: return session.model(self).backend
python
def backend(self): '''Returns the :class:`stdnet.BackendStructure`. ''' session = self.session if session is not None: if self._field: return session.model(self._field.model).backend else: return session.model(self).backend
[ "def", "backend", "(", "self", ")", ":", "session", "=", "self", ".", "session", "if", "session", "is", "not", "None", ":", "if", "self", ".", "_field", ":", "return", "session", ".", "model", "(", "self", ".", "_field", ".", "model", ")", ".", "backend", "else", ":", "return", "session", ".", "model", "(", "self", ")", ".", "backend" ]
Returns the :class:`stdnet.BackendStructure`.
[ "Returns", "the", ":", "class", ":", "stdnet", ".", "BackendStructure", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L257-L265
lsbardel/python-stdnet
stdnet/odm/struct.py
Structure.read_backend
def read_backend(self): '''Returns the :class:`stdnet.BackendStructure`. ''' session = self.session if session is not None: if self._field: return session.model(self._field.model).read_backend else: return session.model(self).read_backend
python
def read_backend(self): '''Returns the :class:`stdnet.BackendStructure`. ''' session = self.session if session is not None: if self._field: return session.model(self._field.model).read_backend else: return session.model(self).read_backend
[ "def", "read_backend", "(", "self", ")", ":", "session", "=", "self", ".", "session", "if", "session", "is", "not", "None", ":", "if", "self", ".", "_field", ":", "return", "session", ".", "model", "(", "self", ".", "_field", ".", "model", ")", ".", "read_backend", "else", ":", "return", "session", ".", "model", "(", "self", ")", ".", "read_backend" ]
Returns the :class:`stdnet.BackendStructure`.
[ "Returns", "the", ":", "class", ":", "stdnet", ".", "BackendStructure", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L268-L276
lsbardel/python-stdnet
stdnet/odm/struct.py
Structure.size
def size(self): '''Number of elements in the :class:`Structure`.''' if self.cache.cache is None: return self.read_backend_structure().size() else: return len(self.cache.cache)
python
def size(self): '''Number of elements in the :class:`Structure`.''' if self.cache.cache is None: return self.read_backend_structure().size() else: return len(self.cache.cache)
[ "def", "size", "(", "self", ")", ":", "if", "self", ".", "cache", ".", "cache", "is", "None", ":", "return", "self", ".", "read_backend_structure", "(", ")", ".", "size", "(", ")", "else", ":", "return", "len", "(", "self", ".", "cache", ".", "cache", ")" ]
Number of elements in the :class:`Structure`.
[ "Number", "of", "elements", "in", "the", ":", "class", ":", "Structure", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L288-L293
lsbardel/python-stdnet
stdnet/odm/struct.py
Structure.load_data
def load_data(self, data, callback=None): '''Load ``data`` from the :class:`stdnet.BackendDataServer`.''' return self.backend.execute( self.value_pickler.load_iterable(data, self.session), callback)
python
def load_data(self, data, callback=None): '''Load ``data`` from the :class:`stdnet.BackendDataServer`.''' return self.backend.execute( self.value_pickler.load_iterable(data, self.session), callback)
[ "def", "load_data", "(", "self", ",", "data", ",", "callback", "=", "None", ")", ":", "return", "self", ".", "backend", ".", "execute", "(", "self", ".", "value_pickler", ".", "load_iterable", "(", "data", ",", "self", ".", "session", ")", ",", "callback", ")" ]
Load ``data`` from the :class:`stdnet.BackendDataServer`.
[ "Load", "data", "from", "the", ":", "class", ":", "stdnet", ".", "BackendDataServer", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L305-L308
lsbardel/python-stdnet
stdnet/odm/struct.py
PairMixin.values
def values(self): '''Iteratir over values of :class:`PairMixin`.''' if self.cache.cache is None: backend = self.read_backend return backend.execute(backend.structure(self).values(), self.load_values) else: return self.cache.cache.values()
python
def values(self): '''Iteratir over values of :class:`PairMixin`.''' if self.cache.cache is None: backend = self.read_backend return backend.execute(backend.structure(self).values(), self.load_values) else: return self.cache.cache.values()
[ "def", "values", "(", "self", ")", ":", "if", "self", ".", "cache", ".", "cache", "is", "None", ":", "backend", "=", "self", ".", "read_backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "values", "(", ")", ",", "self", ".", "load_values", ")", "else", ":", "return", "self", ".", "cache", ".", "cache", ".", "values", "(", ")" ]
Iteratir over values of :class:`PairMixin`.
[ "Iteratir", "over", "values", "of", ":", "class", ":", "PairMixin", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L356-L363
lsbardel/python-stdnet
stdnet/odm/struct.py
PairMixin.pair
def pair(self, pair): '''Add a *pair* to the structure.''' if len(pair) == 1: # if only one value is passed, the value must implement a # score function which retrieve the first value of the pair # (score in zset, timevalue in timeseries, field value in # hashtable) return (pair[0].score(), pair[0]) elif len(pair) != 2: raise TypeError('add expected 2 arguments, got {0}' .format(len(pair))) else: return pair
python
def pair(self, pair): '''Add a *pair* to the structure.''' if len(pair) == 1: # if only one value is passed, the value must implement a # score function which retrieve the first value of the pair # (score in zset, timevalue in timeseries, field value in # hashtable) return (pair[0].score(), pair[0]) elif len(pair) != 2: raise TypeError('add expected 2 arguments, got {0}' .format(len(pair))) else: return pair
[ "def", "pair", "(", "self", ",", "pair", ")", ":", "if", "len", "(", "pair", ")", "==", "1", ":", "# if only one value is passed, the value must implement a\r", "# score function which retrieve the first value of the pair\r", "# (score in zset, timevalue in timeseries, field value in\r", "# hashtable)\r", "return", "(", "pair", "[", "0", "]", ".", "score", "(", ")", ",", "pair", "[", "0", "]", ")", "elif", "len", "(", "pair", ")", "!=", "2", ":", "raise", "TypeError", "(", "'add expected 2 arguments, got {0}'", ".", "format", "(", "len", "(", "pair", ")", ")", ")", "else", ":", "return", "pair" ]
Add a *pair* to the structure.
[ "Add", "a", "*", "pair", "*", "to", "the", "structure", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L365-L377
lsbardel/python-stdnet
stdnet/odm/struct.py
KeyValueMixin.remove
def remove(self, *keys): '''Remove *keys* from the key-value container.''' dumps = self.pickler.dumps self.cache.remove([dumps(v) for v in keys])
python
def remove(self, *keys): '''Remove *keys* from the key-value container.''' dumps = self.pickler.dumps self.cache.remove([dumps(v) for v in keys])
[ "def", "remove", "(", "self", ",", "*", "keys", ")", ":", "dumps", "=", "self", ".", "pickler", ".", "dumps", "self", ".", "cache", ".", "remove", "(", "[", "dumps", "(", "v", ")", "for", "v", "in", "keys", "]", ")" ]
Remove *keys* from the key-value container.
[ "Remove", "*", "keys", "*", "from", "the", "key", "-", "value", "container", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L486-L489
lsbardel/python-stdnet
stdnet/odm/struct.py
OrderedMixin.count
def count(self, start, stop): '''Count the number of elements bewteen *start* and *stop*.''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) return self.backend_structure().count(s1, s2)
python
def count(self, start, stop): '''Count the number of elements bewteen *start* and *stop*.''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) return self.backend_structure().count(s1, s2)
[ "def", "count", "(", "self", ",", "start", ",", "stop", ")", ":", "s1", "=", "self", ".", "pickler", ".", "dumps", "(", "start", ")", "s2", "=", "self", ".", "pickler", ".", "dumps", "(", "stop", ")", "return", "self", ".", "backend_structure", "(", ")", ".", "count", "(", "s1", ",", "s2", ")" ]
Count the number of elements bewteen *start* and *stop*.
[ "Count", "the", "number", "of", "elements", "bewteen", "*", "start", "*", "and", "*", "stop", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L520-L524
lsbardel/python-stdnet
stdnet/odm/struct.py
OrderedMixin.irange
def irange(self, start=0, end=-1, callback=None, withscores=True, **options): '''Return the range by rank between start and end.''' backend = self.read_backend res = backend.structure(self).irange(start, end, withscores=withscores, **options) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
python
def irange(self, start=0, end=-1, callback=None, withscores=True, **options): '''Return the range by rank between start and end.''' backend = self.read_backend res = backend.structure(self).irange(start, end, withscores=withscores, **options) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
[ "def", "irange", "(", "self", ",", "start", "=", "0", ",", "end", "=", "-", "1", ",", "callback", "=", "None", ",", "withscores", "=", "True", ",", "*", "*", "options", ")", ":", "backend", "=", "self", ".", "read_backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "irange", "(", "start", ",", "end", ",", "withscores", "=", "withscores", ",", "*", "*", "options", ")", "if", "not", "callback", ":", "callback", "=", "self", ".", "load_data", "if", "withscores", "else", "self", ".", "load_values", "return", "backend", ".", "execute", "(", "res", ",", "callback", ")" ]
Return the range by rank between start and end.
[ "Return", "the", "range", "by", "rank", "between", "start", "and", "end", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L537-L546
lsbardel/python-stdnet
stdnet/odm/struct.py
OrderedMixin.pop_range
def pop_range(self, start, stop, callback=None, withscores=True): '''pop a range by score from the :class:`OrderedMixin`''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) backend = self.backend res = backend.structure(self).pop_range(s1, s2, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
python
def pop_range(self, start, stop, callback=None, withscores=True): '''pop a range by score from the :class:`OrderedMixin`''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) backend = self.backend res = backend.structure(self).pop_range(s1, s2, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
[ "def", "pop_range", "(", "self", ",", "start", ",", "stop", ",", "callback", "=", "None", ",", "withscores", "=", "True", ")", ":", "s1", "=", "self", ".", "pickler", ".", "dumps", "(", "start", ")", "s2", "=", "self", ".", "pickler", ".", "dumps", "(", "stop", ")", "backend", "=", "self", ".", "backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "pop_range", "(", "s1", ",", "s2", ",", "withscores", "=", "withscores", ")", "if", "not", "callback", ":", "callback", "=", "self", ".", "load_data", "if", "withscores", "else", "self", ".", "load_values", "return", "backend", ".", "execute", "(", "res", ",", "callback", ")" ]
pop a range by score from the :class:`OrderedMixin`
[ "pop", "a", "range", "by", "score", "from", "the", ":", "class", ":", "OrderedMixin" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L548-L556
lsbardel/python-stdnet
stdnet/odm/struct.py
OrderedMixin.ipop_range
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True): '''pop a range from the :class:`OrderedMixin`''' backend = self.backend res = backend.structure(self).ipop_range(start, stop, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
python
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True): '''pop a range from the :class:`OrderedMixin`''' backend = self.backend res = backend.structure(self).ipop_range(start, stop, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
[ "def", "ipop_range", "(", "self", ",", "start", "=", "0", ",", "stop", "=", "-", "1", ",", "callback", "=", "None", ",", "withscores", "=", "True", ")", ":", "backend", "=", "self", ".", "backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "ipop_range", "(", "start", ",", "stop", ",", "withscores", "=", "withscores", ")", "if", "not", "callback", ":", "callback", "=", "self", ".", "load_data", "if", "withscores", "else", "self", ".", "load_values", "return", "backend", ".", "execute", "(", "res", ",", "callback", ")" ]
pop a range from the :class:`OrderedMixin`
[ "pop", "a", "range", "from", "the", ":", "class", ":", "OrderedMixin" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L558-L565
lsbardel/python-stdnet
stdnet/odm/struct.py
Sequence.push_back
def push_back(self, value): '''Appends a copy of *value* at the end of the :class:`Sequence`.''' self.cache.push_back(self.value_pickler.dumps(value)) return self
python
def push_back(self, value): '''Appends a copy of *value* at the end of the :class:`Sequence`.''' self.cache.push_back(self.value_pickler.dumps(value)) return self
[ "def", "push_back", "(", "self", ",", "value", ")", ":", "self", ".", "cache", ".", "push_back", "(", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", ")", "return", "self" ]
Appends a copy of *value* at the end of the :class:`Sequence`.
[ "Appends", "a", "copy", "of", "*", "value", "*", "at", "the", "end", "of", "the", ":", "class", ":", "Sequence", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L583-L586
lsbardel/python-stdnet
stdnet/odm/struct.py
Sequence.pop_back
def pop_back(self): '''Remove the last element from the :class:`Sequence`.''' backend = self.backend return backend.execute(backend.structure(self).pop_back(), self.value_pickler.loads)
python
def pop_back(self): '''Remove the last element from the :class:`Sequence`.''' backend = self.backend return backend.execute(backend.structure(self).pop_back(), self.value_pickler.loads)
[ "def", "pop_back", "(", "self", ")", ":", "backend", "=", "self", ".", "backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "pop_back", "(", ")", ",", "self", ".", "value_pickler", ".", "loads", ")" ]
Remove the last element from the :class:`Sequence`.
[ "Remove", "the", "last", "element", "from", "the", ":", "class", ":", "Sequence", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L588-L592
lsbardel/python-stdnet
stdnet/odm/struct.py
Set.add
def add(self, value): '''Add *value* to the set''' return self.cache.update((self.value_pickler.dumps(value),))
python
def add(self, value): '''Add *value* to the set''' return self.cache.update((self.value_pickler.dumps(value),))
[ "def", "add", "(", "self", ",", "value", ")", ":", "return", "self", ".", "cache", ".", "update", "(", "(", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", ",", ")", ")" ]
Add *value* to the set
[ "Add", "*", "value", "*", "to", "the", "set" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L613-L615
lsbardel/python-stdnet
stdnet/odm/struct.py
Set.update
def update(self, values): '''Add iterable *values* to the set''' d = self.value_pickler.dumps return self.cache.update(tuple((d(v) for v in values)))
python
def update(self, values): '''Add iterable *values* to the set''' d = self.value_pickler.dumps return self.cache.update(tuple((d(v) for v in values)))
[ "def", "update", "(", "self", ",", "values", ")", ":", "d", "=", "self", ".", "value_pickler", ".", "dumps", "return", "self", ".", "cache", ".", "update", "(", "tuple", "(", "(", "d", "(", "v", ")", "for", "v", "in", "values", ")", ")", ")" ]
Add iterable *values* to the set
[ "Add", "iterable", "*", "values", "*", "to", "the", "set" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L618-L621
lsbardel/python-stdnet
stdnet/odm/struct.py
Set.discard
def discard(self, value): '''Remove an element *value* from a set if it is a member.''' return self.cache.remove((self.value_pickler.dumps(value),))
python
def discard(self, value): '''Remove an element *value* from a set if it is a member.''' return self.cache.remove((self.value_pickler.dumps(value),))
[ "def", "discard", "(", "self", ",", "value", ")", ":", "return", "self", ".", "cache", ".", "remove", "(", "(", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", ",", ")", ")" ]
Remove an element *value* from a set if it is a member.
[ "Remove", "an", "element", "*", "value", "*", "from", "a", "set", "if", "it", "is", "a", "member", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L624-L626
lsbardel/python-stdnet
stdnet/odm/struct.py
Set.difference_update
def difference_update(self, values): '''Remove an iterable of *values* from the set.''' d = self.value_pickler.dumps return self.cache.remove(tuple((d(v) for v in values)))
python
def difference_update(self, values): '''Remove an iterable of *values* from the set.''' d = self.value_pickler.dumps return self.cache.remove(tuple((d(v) for v in values)))
[ "def", "difference_update", "(", "self", ",", "values", ")", ":", "d", "=", "self", ".", "value_pickler", ".", "dumps", "return", "self", ".", "cache", ".", "remove", "(", "tuple", "(", "(", "d", "(", "v", ")", "for", "v", "in", "values", ")", ")", ")" ]
Remove an iterable of *values* from the set.
[ "Remove", "an", "iterable", "of", "*", "values", "*", "from", "the", "set", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L630-L633
lsbardel/python-stdnet
stdnet/odm/struct.py
List.pop_front
def pop_front(self): '''Remove the first element from of the list.''' backend = self.backend return backend.execute(backend.structure(self).pop_front(), self.value_pickler.loads)
python
def pop_front(self): '''Remove the first element from of the list.''' backend = self.backend return backend.execute(backend.structure(self).pop_front(), self.value_pickler.loads)
[ "def", "pop_front", "(", "self", ")", ":", "backend", "=", "self", ".", "backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "pop_front", "(", ")", ",", "self", ".", "value_pickler", ".", "loads", ")" ]
Remove the first element from of the list.
[ "Remove", "the", "first", "element", "from", "of", "the", "list", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L640-L644
lsbardel/python-stdnet
stdnet/odm/struct.py
List.block_pop_back
def block_pop_back(self, timeout=10): '''Remove the last element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.''' value = yield self.backend_structure().block_pop_back(timeout) if value is not None: yield self.value_pickler.loads(value)
python
def block_pop_back(self, timeout=10): '''Remove the last element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.''' value = yield self.backend_structure().block_pop_back(timeout) if value is not None: yield self.value_pickler.loads(value)
[ "def", "block_pop_back", "(", "self", ",", "timeout", "=", "10", ")", ":", "value", "=", "yield", "self", ".", "backend_structure", "(", ")", ".", "block_pop_back", "(", "timeout", ")", "if", "value", "is", "not", "None", ":", "yield", "self", ".", "value_pickler", ".", "loads", "(", "value", ")" ]
Remove the last element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.
[ "Remove", "the", "last", "element", "from", "of", "the", "list", ".", "If", "no", "elements", "are", "available", "blocks", "for", "at", "least", "timeout", "seconds", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L646-L651
lsbardel/python-stdnet
stdnet/odm/struct.py
List.block_pop_front
def block_pop_front(self, timeout=10): '''Remove the first element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.''' value = yield self.backend_structure().block_pop_front(timeout) if value is not None: yield self.value_pickler.loads(value)
python
def block_pop_front(self, timeout=10): '''Remove the first element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.''' value = yield self.backend_structure().block_pop_front(timeout) if value is not None: yield self.value_pickler.loads(value)
[ "def", "block_pop_front", "(", "self", ",", "timeout", "=", "10", ")", ":", "value", "=", "yield", "self", ".", "backend_structure", "(", ")", ".", "block_pop_front", "(", "timeout", ")", "if", "value", "is", "not", "None", ":", "yield", "self", ".", "value_pickler", ".", "loads", "(", "value", ")" ]
Remove the first element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.
[ "Remove", "the", "first", "element", "from", "of", "the", "list", ".", "If", "no", "elements", "are", "available", "blocks", "for", "at", "least", "timeout", "seconds", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L653-L658
lsbardel/python-stdnet
stdnet/odm/struct.py
List.push_front
def push_front(self, value): '''Appends a copy of ``value`` to the beginning of the list.''' self.cache.push_front(self.value_pickler.dumps(value))
python
def push_front(self, value): '''Appends a copy of ``value`` to the beginning of the list.''' self.cache.push_front(self.value_pickler.dumps(value))
[ "def", "push_front", "(", "self", ",", "value", ")", ":", "self", ".", "cache", ".", "push_front", "(", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", ")" ]
Appends a copy of ``value`` to the beginning of the list.
[ "Appends", "a", "copy", "of", "value", "to", "the", "beginning", "of", "the", "list", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L661-L663
lsbardel/python-stdnet
stdnet/odm/struct.py
Zset.rank
def rank(self, value): '''The rank of a given *value*. This is the position of *value* in the :class:`OrderedMixin` container.''' value = self.value_pickler.dumps(value) return self.backend_structure().rank(value)
python
def rank(self, value): '''The rank of a given *value*. This is the position of *value* in the :class:`OrderedMixin` container.''' value = self.value_pickler.dumps(value) return self.backend_structure().rank(value)
[ "def", "rank", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "value_pickler", ".", "dumps", "(", "value", ")", "return", "self", ".", "backend_structure", "(", ")", ".", "rank", "(", "value", ")" ]
The rank of a given *value*. This is the position of *value* in the :class:`OrderedMixin` container.
[ "The", "rank", "of", "a", "given", "*", "value", "*", ".", "This", "is", "the", "position", "of", "*", "value", "*", "in", "the", ":", "class", ":", "OrderedMixin", "container", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L672-L676
lsbardel/python-stdnet
stdnet/odm/struct.py
TS.rank
def rank(self, dte): '''The rank of a given *dte* in the timeseries''' timestamp = self.pickler.dumps(dte) return self.backend_structure().rank(timestamp)
python
def rank(self, dte): '''The rank of a given *dte* in the timeseries''' timestamp = self.pickler.dumps(dte) return self.backend_structure().rank(timestamp)
[ "def", "rank", "(", "self", ",", "dte", ")", ":", "timestamp", "=", "self", ".", "pickler", ".", "dumps", "(", "dte", ")", "return", "self", ".", "backend_structure", "(", ")", ".", "rank", "(", "timestamp", ")" ]
The rank of a given *dte* in the timeseries
[ "The", "rank", "of", "a", "given", "*", "dte", "*", "in", "the", "timeseries" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L708-L711
lsbardel/python-stdnet
stdnet/odm/struct.py
TS.ipop
def ipop(self, index): '''Pop a value at *index* from the :class:`TS`. Return ``None`` if index is not out of bound.''' backend = self.backend res = backend.structure(self).ipop(index) return backend.execute(res, lambda r: self._load_get_data(r, index, None))
python
def ipop(self, index): '''Pop a value at *index* from the :class:`TS`. Return ``None`` if index is not out of bound.''' backend = self.backend res = backend.structure(self).ipop(index) return backend.execute(res, lambda r: self._load_get_data(r, index, None))
[ "def", "ipop", "(", "self", ",", "index", ")", ":", "backend", "=", "self", ".", "backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "ipop", "(", "index", ")", "return", "backend", ".", "execute", "(", "res", ",", "lambda", "r", ":", "self", ".", "_load_get_data", "(", "r", ",", "index", ",", "None", ")", ")" ]
Pop a value at *index* from the :class:`TS`. Return ``None`` if index is not out of bound.
[ "Pop", "a", "value", "at", "*", "index", "*", "from", "the", ":", "class", ":", "TS", ".", "Return", "None", "if", "index", "is", "not", "out", "of", "bound", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L713-L719
lsbardel/python-stdnet
stdnet/odm/struct.py
TS.times
def times(self, start, stop, callback=None, **kwargs): '''The times between times *start* and *stop*.''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) backend = self.read_backend res = backend.structure(self).times(s1, s2, **kwargs) return backend.execute(res, callback or self.load_keys)
python
def times(self, start, stop, callback=None, **kwargs): '''The times between times *start* and *stop*.''' s1 = self.pickler.dumps(start) s2 = self.pickler.dumps(stop) backend = self.read_backend res = backend.structure(self).times(s1, s2, **kwargs) return backend.execute(res, callback or self.load_keys)
[ "def", "times", "(", "self", ",", "start", ",", "stop", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "s1", "=", "self", ".", "pickler", ".", "dumps", "(", "start", ")", "s2", "=", "self", ".", "pickler", ".", "dumps", "(", "stop", ")", "backend", "=", "self", ".", "read_backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "times", "(", "s1", ",", "s2", ",", "*", "*", "kwargs", ")", "return", "backend", ".", "execute", "(", "res", ",", "callback", "or", "self", ".", "load_keys", ")" ]
The times between times *start* and *stop*.
[ "The", "times", "between", "times", "*", "start", "*", "and", "*", "stop", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L721-L727
lsbardel/python-stdnet
stdnet/odm/struct.py
TS.itimes
def itimes(self, start=0, stop=-1, callback=None, **kwargs): '''The times between rank *start* and *stop*.''' backend = self.read_backend res = backend.structure(self).itimes(start, stop, **kwargs) return backend.execute(res, callback or self.load_keys)
python
def itimes(self, start=0, stop=-1, callback=None, **kwargs): '''The times between rank *start* and *stop*.''' backend = self.read_backend res = backend.structure(self).itimes(start, stop, **kwargs) return backend.execute(res, callback or self.load_keys)
[ "def", "itimes", "(", "self", ",", "start", "=", "0", ",", "stop", "=", "-", "1", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "backend", "=", "self", ".", "read_backend", "res", "=", "backend", ".", "structure", "(", "self", ")", ".", "itimes", "(", "start", ",", "stop", ",", "*", "*", "kwargs", ")", "return", "backend", ".", "execute", "(", "res", ",", "callback", "or", "self", ".", "load_keys", ")" ]
The times between rank *start* and *stop*.
[ "The", "times", "between", "rank", "*", "start", "*", "and", "*", "stop", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L729-L733
lsbardel/python-stdnet
stdnet/odm/query.py
Q.get_field
def get_field(self, field): '''A :class:`Q` performs a series of operations and ultimately generate of set of matched elements ``ids``. If on the other hand, a different field is required, it can be specified with the :meth:`get_field` method. For example, lets say a model has a field called ``object_id`` which contains ids of another model, we could use:: qs = session.query(MyModel).get_field('object_id') to obtain a set containing the values of matched elements ``object_id`` fields. :parameter field: the name of the field which will be used to obtained the matched elements value. Must be an index. :rtype: a new :class:`Q` instance. ''' if field != self._get_field: if field not in self._meta.dfields: raise QuerySetError('Model "{0}" has no field "{1}".' .format(self._meta, field)) q = self._clone() q.data['get_field'] = field return q else: return self
python
def get_field(self, field): '''A :class:`Q` performs a series of operations and ultimately generate of set of matched elements ``ids``. If on the other hand, a different field is required, it can be specified with the :meth:`get_field` method. For example, lets say a model has a field called ``object_id`` which contains ids of another model, we could use:: qs = session.query(MyModel).get_field('object_id') to obtain a set containing the values of matched elements ``object_id`` fields. :parameter field: the name of the field which will be used to obtained the matched elements value. Must be an index. :rtype: a new :class:`Q` instance. ''' if field != self._get_field: if field not in self._meta.dfields: raise QuerySetError('Model "{0}" has no field "{1}".' .format(self._meta, field)) q = self._clone() q.data['get_field'] = field return q else: return self
[ "def", "get_field", "(", "self", ",", "field", ")", ":", "if", "field", "!=", "self", ".", "_get_field", ":", "if", "field", "not", "in", "self", ".", "_meta", ".", "dfields", ":", "raise", "QuerySetError", "(", "'Model \"{0}\" has no field \"{1}\".'", ".", "format", "(", "self", ".", "_meta", ",", "field", ")", ")", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "data", "[", "'get_field'", "]", "=", "field", "return", "q", "else", ":", "return", "self" ]
A :class:`Q` performs a series of operations and ultimately generate of set of matched elements ``ids``. If on the other hand, a different field is required, it can be specified with the :meth:`get_field` method. For example, lets say a model has a field called ``object_id`` which contains ids of another model, we could use:: qs = session.query(MyModel).get_field('object_id') to obtain a set containing the values of matched elements ``object_id`` fields. :parameter field: the name of the field which will be used to obtained the matched elements value. Must be an index. :rtype: a new :class:`Q` instance.
[ "A", ":", "class", ":", "Q", "performs", "a", "series", "of", "operations", "and", "ultimately", "generate", "of", "set", "of", "matched", "elements", "ids", ".", "If", "on", "the", "other", "hand", "a", "different", "field", "is", "required", "it", "can", "be", "specified", "with", "the", ":", "meth", ":", "get_field", "method", ".", "For", "example", "lets", "say", "a", "model", "has", "a", "field", "called", "object_id", "which", "contains", "ids", "of", "another", "model", "we", "could", "use", "::", "qs", "=", "session", ".", "query", "(", "MyModel", ")", ".", "get_field", "(", "object_id", ")", "to", "obtain", "a", "set", "containing", "the", "values", "of", "matched", "elements", "object_id", "fields", ".", ":", "parameter", "field", ":", "the", "name", "of", "the", "field", "which", "will", "be", "used", "to", "obtained", "the", "matched", "elements", "value", ".", "Must", "be", "an", "index", ".", ":", "rtype", ":", "a", "new", ":", "class", ":", "Q", "instance", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L103-L127
lsbardel/python-stdnet
stdnet/odm/query.py
Query.filter
def filter(self, **kwargs): '''Create a new :class:`Query` with additional clauses corresponding to ``where`` or ``limit`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. For example:: qs = session.query(MyModel) result = qs.filter(group = 'planet') ''' if kwargs: q = self._clone() if self.fargs: kwargs = update_dictionary(self.fargs.copy(), kwargs) q.fargs = kwargs return q else: return self
python
def filter(self, **kwargs): '''Create a new :class:`Query` with additional clauses corresponding to ``where`` or ``limit`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. For example:: qs = session.query(MyModel) result = qs.filter(group = 'planet') ''' if kwargs: q = self._clone() if self.fargs: kwargs = update_dictionary(self.fargs.copy(), kwargs) q.fargs = kwargs return q else: return self
[ "def", "filter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "q", "=", "self", ".", "_clone", "(", ")", "if", "self", ".", "fargs", ":", "kwargs", "=", "update_dictionary", "(", "self", ".", "fargs", ".", "copy", "(", ")", ",", "kwargs", ")", "q", ".", "fargs", "=", "kwargs", "return", "q", "else", ":", "return", "self" ]
Create a new :class:`Query` with additional clauses corresponding to ``where`` or ``limit`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. For example:: qs = session.query(MyModel) result = qs.filter(group = 'planet')
[ "Create", "a", "new", ":", "class", ":", "Query", "with", "additional", "clauses", "corresponding", "to", "where", "or", "limit", "in", "a", "SQL", "SELECT", "statement", ".", ":", "parameter", "kwargs", ":", "dictionary", "of", "limiting", "clauses", ".", ":", "rtype", ":", "a", "new", ":", "class", ":", "Query", "instance", ".", "For", "example", "::", "qs", "=", "session", ".", "query", "(", "MyModel", ")", "result", "=", "qs", ".", "filter", "(", "group", "=", "planet", ")" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L401-L420
lsbardel/python-stdnet
stdnet/odm/query.py
Query.exclude
def exclude(self, **kwargs): '''Returns a new :class:`Query` with additional clauses corresponding to ``EXCEPT`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. Using an equivalent example to the :meth:`filter` method:: qs = session.query(MyModel) result1 = qs.exclude(group = 'planet') result2 = qs.exclude(group__in = ('planet','stars')) ''' if kwargs: q = self._clone() if self.eargs: kwargs = update_dictionary(self.eargs.copy(), kwargs) q.eargs = kwargs return q else: return self
python
def exclude(self, **kwargs): '''Returns a new :class:`Query` with additional clauses corresponding to ``EXCEPT`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. Using an equivalent example to the :meth:`filter` method:: qs = session.query(MyModel) result1 = qs.exclude(group = 'planet') result2 = qs.exclude(group__in = ('planet','stars')) ''' if kwargs: q = self._clone() if self.eargs: kwargs = update_dictionary(self.eargs.copy(), kwargs) q.eargs = kwargs return q else: return self
[ "def", "exclude", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "q", "=", "self", ".", "_clone", "(", ")", "if", "self", ".", "eargs", ":", "kwargs", "=", "update_dictionary", "(", "self", ".", "eargs", ".", "copy", "(", ")", ",", "kwargs", ")", "q", ".", "eargs", "=", "kwargs", "return", "q", "else", ":", "return", "self" ]
Returns a new :class:`Query` with additional clauses corresponding to ``EXCEPT`` in a ``SQL SELECT`` statement. :parameter kwargs: dictionary of limiting clauses. :rtype: a new :class:`Query` instance. Using an equivalent example to the :meth:`filter` method:: qs = session.query(MyModel) result1 = qs.exclude(group = 'planet') result2 = qs.exclude(group__in = ('planet','stars'))
[ "Returns", "a", "new", ":", "class", ":", "Query", "with", "additional", "clauses", "corresponding", "to", "EXCEPT", "in", "a", "SQL", "SELECT", "statement", ".", ":", "parameter", "kwargs", ":", "dictionary", "of", "limiting", "clauses", ".", ":", "rtype", ":", "a", "new", ":", "class", ":", "Query", "instance", ".", "Using", "an", "equivalent", "example", "to", "the", ":", "meth", ":", "filter", "method", "::", "qs", "=", "session", ".", "query", "(", "MyModel", ")", "result1", "=", "qs", ".", "exclude", "(", "group", "=", "planet", ")", "result2", "=", "qs", ".", "exclude", "(", "group__in", "=", "(", "planet", "stars", "))" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L422-L443
lsbardel/python-stdnet
stdnet/odm/query.py
Query.union
def union(self, *queries): '''Return a new :class:`Query` obtained form the union of this :class:`Query` with one or more *queries*. For example, lets say we want to have the union of two queries obtained from the :meth:`filter` method:: query = session.query(MyModel) qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo')) ''' q = self._clone() q.unions += queries return q
python
def union(self, *queries): '''Return a new :class:`Query` obtained form the union of this :class:`Query` with one or more *queries*. For example, lets say we want to have the union of two queries obtained from the :meth:`filter` method:: query = session.query(MyModel) qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo')) ''' q = self._clone() q.unions += queries return q
[ "def", "union", "(", "self", ",", "*", "queries", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "unions", "+=", "queries", "return", "q" ]
Return a new :class:`Query` obtained form the union of this :class:`Query` with one or more *queries*. For example, lets say we want to have the union of two queries obtained from the :meth:`filter` method:: query = session.query(MyModel) qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
[ "Return", "a", "new", ":", "class", ":", "Query", "obtained", "form", "the", "union", "of", "this", ":", "class", ":", "Query", "with", "one", "or", "more", "*", "queries", "*", ".", "For", "example", "lets", "say", "we", "want", "to", "have", "the", "union", "of", "two", "queries", "obtained", "from", "the", ":", "meth", ":", "filter", "method", "::", "query", "=", "session", ".", "query", "(", "MyModel", ")", "qs", "=", "query", ".", "filter", "(", "field1", "=", "bla", ")", ".", "union", "(", "query", ".", "filter", "(", "field2", "=", "foo", "))" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L445-L456
lsbardel/python-stdnet
stdnet/odm/query.py
Query.intersect
def intersect(self, *queries): '''Return a new :class:`Query` obtained form the intersection of this :class:`Query` with one or more *queries*. Workds the same way as the :meth:`union` method.''' q = self._clone() q.intersections += queries return q
python
def intersect(self, *queries): '''Return a new :class:`Query` obtained form the intersection of this :class:`Query` with one or more *queries*. Workds the same way as the :meth:`union` method.''' q = self._clone() q.intersections += queries return q
[ "def", "intersect", "(", "self", ",", "*", "queries", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "intersections", "+=", "queries", "return", "q" ]
Return a new :class:`Query` obtained form the intersection of this :class:`Query` with one or more *queries*. Workds the same way as the :meth:`union` method.
[ "Return", "a", "new", ":", "class", ":", "Query", "obtained", "form", "the", "intersection", "of", "this", ":", "class", ":", "Query", "with", "one", "or", "more", "*", "queries", "*", ".", "Workds", "the", "same", "way", "as", "the", ":", "meth", ":", "union", "method", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L458-L464
lsbardel/python-stdnet
stdnet/odm/query.py
Query.sort_by
def sort_by(self, ordering): '''Sort the query by the given field :parameter ordering: a string indicating the class:`Field` name to sort by. If prefixed with ``-``, the sorting will be in descending order, otherwise in ascending order. :return type: a new :class:`Query` instance. ''' if ordering: ordering = self._meta.get_sorting(ordering, QuerySetError) q = self._clone() q.data['ordering'] = ordering return q
python
def sort_by(self, ordering): '''Sort the query by the given field :parameter ordering: a string indicating the class:`Field` name to sort by. If prefixed with ``-``, the sorting will be in descending order, otherwise in ascending order. :return type: a new :class:`Query` instance. ''' if ordering: ordering = self._meta.get_sorting(ordering, QuerySetError) q = self._clone() q.data['ordering'] = ordering return q
[ "def", "sort_by", "(", "self", ",", "ordering", ")", ":", "if", "ordering", ":", "ordering", "=", "self", ".", "_meta", ".", "get_sorting", "(", "ordering", ",", "QuerySetError", ")", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "data", "[", "'ordering'", "]", "=", "ordering", "return", "q" ]
Sort the query by the given field :parameter ordering: a string indicating the class:`Field` name to sort by. If prefixed with ``-``, the sorting will be in descending order, otherwise in ascending order. :return type: a new :class:`Query` instance.
[ "Sort", "the", "query", "by", "the", "given", "field", ":", "parameter", "ordering", ":", "a", "string", "indicating", "the", "class", ":", "Field", "name", "to", "sort", "by", ".", "If", "prefixed", "with", "-", "the", "sorting", "will", "be", "in", "descending", "order", "otherwise", "in", "ascending", "order", ".", ":", "return", "type", ":", "a", "new", ":", "class", ":", "Query", "instance", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L466-L478
lsbardel/python-stdnet
stdnet/odm/query.py
Query.search
def search(self, text, lookup=None): '''Search *text* in model. A search engine needs to be installed for this function to be available. :parameter text: a string to search. :return type: a new :class:`Query` instance. ''' q = self._clone() q.text = (text, lookup) return q
python
def search(self, text, lookup=None): '''Search *text* in model. A search engine needs to be installed for this function to be available. :parameter text: a string to search. :return type: a new :class:`Query` instance. ''' q = self._clone() q.text = (text, lookup) return q
[ "def", "search", "(", "self", ",", "text", ",", "lookup", "=", "None", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "text", "=", "(", "text", ",", "lookup", ")", "return", "q" ]
Search *text* in model. A search engine needs to be installed for this function to be available. :parameter text: a string to search. :return type: a new :class:`Query` instance.
[ "Search", "*", "text", "*", "in", "model", ".", "A", "search", "engine", "needs", "to", "be", "installed", "for", "this", "function", "to", "be", "available", ".", ":", "parameter", "text", ":", "a", "string", "to", "search", ".", ":", "return", "type", ":", "a", "new", ":", "class", ":", "Query", "instance", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L480-L489
lsbardel/python-stdnet
stdnet/odm/query.py
Query.where
def where(self, code, load_only=None): '''For :ref:`backend <db-index>` supporting scripting, it is possible to construct complex queries which execute the scripting *code* against each element in the query. The *coe* should reference an instance of :attr:`model` by ``this`` keyword. :parameter code: a valid expression in the scripting language of the database. :parameter load_only: Load only the selected fields when performing the query (this is different from the :meth:`load_only` method which is used when fetching data from the database). This field is an optimization which is used by the :ref:`redis backend <redis-server>` only and can be safely ignored in most use-cases. :return: a new :class:`Query` ''' if code: q = self._clone() q.data['where'] = (code, load_only) return q else: return self
python
def where(self, code, load_only=None): '''For :ref:`backend <db-index>` supporting scripting, it is possible to construct complex queries which execute the scripting *code* against each element in the query. The *coe* should reference an instance of :attr:`model` by ``this`` keyword. :parameter code: a valid expression in the scripting language of the database. :parameter load_only: Load only the selected fields when performing the query (this is different from the :meth:`load_only` method which is used when fetching data from the database). This field is an optimization which is used by the :ref:`redis backend <redis-server>` only and can be safely ignored in most use-cases. :return: a new :class:`Query` ''' if code: q = self._clone() q.data['where'] = (code, load_only) return q else: return self
[ "def", "where", "(", "self", ",", "code", ",", "load_only", "=", "None", ")", ":", "if", "code", ":", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "data", "[", "'where'", "]", "=", "(", "code", ",", "load_only", ")", "return", "q", "else", ":", "return", "self" ]
For :ref:`backend <db-index>` supporting scripting, it is possible to construct complex queries which execute the scripting *code* against each element in the query. The *coe* should reference an instance of :attr:`model` by ``this`` keyword. :parameter code: a valid expression in the scripting language of the database. :parameter load_only: Load only the selected fields when performing the query (this is different from the :meth:`load_only` method which is used when fetching data from the database). This field is an optimization which is used by the :ref:`redis backend <redis-server>` only and can be safely ignored in most use-cases. :return: a new :class:`Query`
[ "For", ":", "ref", ":", "backend", "<db", "-", "index", ">", "supporting", "scripting", "it", "is", "possible", "to", "construct", "complex", "queries", "which", "execute", "the", "scripting", "*", "code", "*", "against", "each", "element", "in", "the", "query", ".", "The", "*", "coe", "*", "should", "reference", "an", "instance", "of", ":", "attr", ":", "model", "by", "this", "keyword", ".", ":", "parameter", "code", ":", "a", "valid", "expression", "in", "the", "scripting", "language", "of", "the", "database", ".", ":", "parameter", "load_only", ":", "Load", "only", "the", "selected", "fields", "when", "performing", "the", "query", "(", "this", "is", "different", "from", "the", ":", "meth", ":", "load_only", "method", "which", "is", "used", "when", "fetching", "data", "from", "the", "database", ")", ".", "This", "field", "is", "an", "optimization", "which", "is", "used", "by", "the", ":", "ref", ":", "redis", "backend", "<redis", "-", "server", ">", "only", "and", "can", "be", "safely", "ignored", "in", "most", "use", "-", "cases", ".", ":", "return", ":", "a", "new", ":", "class", ":", "Query" ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L491-L510
lsbardel/python-stdnet
stdnet/odm/query.py
Query.search_queries
def search_queries(self, q): '''Return a new :class:`QueryElem` for *q* applying a text search.''' if self.text: searchengine = self.session.router.search_engine if searchengine: return searchengine.search_model(q, *self.text) else: raise QuerySetError('Search not available for %s' % self._meta) else: return q
python
def search_queries(self, q): '''Return a new :class:`QueryElem` for *q* applying a text search.''' if self.text: searchengine = self.session.router.search_engine if searchengine: return searchengine.search_model(q, *self.text) else: raise QuerySetError('Search not available for %s' % self._meta) else: return q
[ "def", "search_queries", "(", "self", ",", "q", ")", ":", "if", "self", ".", "text", ":", "searchengine", "=", "self", ".", "session", ".", "router", ".", "search_engine", "if", "searchengine", ":", "return", "searchengine", ".", "search_model", "(", "q", ",", "*", "self", ".", "text", ")", "else", ":", "raise", "QuerySetError", "(", "'Search not available for %s'", "%", "self", ".", "_meta", ")", "else", ":", "return", "q" ]
Return a new :class:`QueryElem` for *q* applying a text search.
[ "Return", "a", "new", ":", "class", ":", "QueryElem", "for", "*", "q", "*", "applying", "a", "text", "search", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L512-L521
lsbardel/python-stdnet
stdnet/odm/query.py
Query.load_related
def load_related(self, related, *related_fields): '''It returns a new :class:`Query` that automatically follows the foreign-key relationship ``related``. :parameter related: A field name corresponding to a :class:`ForeignKey` in :attr:`Query.model`. :parameter related_fields: optional :class:`Field` names for the ``related`` model to load. If not provided, all fields will be loaded. This function is :ref:`performance boost <performance-loadrelated>` when accessing the related fields of all (most) objects in your query. If Your model contains more than one foreign key, you can use this function in a generative way:: qs = myquery.load_related('rel1').load_related('rel2','field1','field2') :rtype: a new :class:`Query`.''' field = self._get_related_field(related) if not field: raise FieldError('"%s" is not a related field for "%s"' % (related, self._meta)) q = self._clone() return q._add_to_load_related(field, *related_fields)
python
def load_related(self, related, *related_fields): '''It returns a new :class:`Query` that automatically follows the foreign-key relationship ``related``. :parameter related: A field name corresponding to a :class:`ForeignKey` in :attr:`Query.model`. :parameter related_fields: optional :class:`Field` names for the ``related`` model to load. If not provided, all fields will be loaded. This function is :ref:`performance boost <performance-loadrelated>` when accessing the related fields of all (most) objects in your query. If Your model contains more than one foreign key, you can use this function in a generative way:: qs = myquery.load_related('rel1').load_related('rel2','field1','field2') :rtype: a new :class:`Query`.''' field = self._get_related_field(related) if not field: raise FieldError('"%s" is not a related field for "%s"' % (related, self._meta)) q = self._clone() return q._add_to_load_related(field, *related_fields)
[ "def", "load_related", "(", "self", ",", "related", ",", "*", "related_fields", ")", ":", "field", "=", "self", ".", "_get_related_field", "(", "related", ")", "if", "not", "field", ":", "raise", "FieldError", "(", "'\"%s\" is not a related field for \"%s\"'", "%", "(", "related", ",", "self", ".", "_meta", ")", ")", "q", "=", "self", ".", "_clone", "(", ")", "return", "q", ".", "_add_to_load_related", "(", "field", ",", "*", "related_fields", ")" ]
It returns a new :class:`Query` that automatically follows the foreign-key relationship ``related``. :parameter related: A field name corresponding to a :class:`ForeignKey` in :attr:`Query.model`. :parameter related_fields: optional :class:`Field` names for the ``related`` model to load. If not provided, all fields will be loaded. This function is :ref:`performance boost <performance-loadrelated>` when accessing the related fields of all (most) objects in your query. If Your model contains more than one foreign key, you can use this function in a generative way:: qs = myquery.load_related('rel1').load_related('rel2','field1','field2') :rtype: a new :class:`Query`.
[ "It", "returns", "a", "new", ":", "class", ":", "Query", "that", "automatically", "follows", "the", "foreign", "-", "key", "relationship", "related", ".", ":", "parameter", "related", ":", "A", "field", "name", "corresponding", "to", "a", ":", "class", ":", "ForeignKey", "in", ":", "attr", ":", "Query", ".", "model", ".", ":", "parameter", "related_fields", ":", "optional", ":", "class", ":", "Field", "names", "for", "the", "related", "model", "to", "load", ".", "If", "not", "provided", "all", "fields", "will", "be", "loaded", ".", "This", "function", "is", ":", "ref", ":", "performance", "boost", "<performance", "-", "loadrelated", ">", "when", "accessing", "the", "related", "fields", "of", "all", "(", "most", ")", "objects", "in", "your", "query", ".", "If", "Your", "model", "contains", "more", "than", "one", "foreign", "key", "you", "can", "use", "this", "function", "in", "a", "generative", "way", "::", "qs", "=", "myquery", ".", "load_related", "(", "rel1", ")", ".", "load_related", "(", "rel2", "field1", "field2", ")", ":", "rtype", ":", "a", "new", ":", "class", ":", "Query", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L523-L546
lsbardel/python-stdnet
stdnet/odm/query.py
Query.load_only
def load_only(self, *fields): '''This is provides a :ref:`performance boost <increase-performance>` in cases when you need to load a subset of fields of your model. The boost achieved is less than the one obtained when using :meth:`Query.load_related`, since it does not reduce the number of requests to the database. However, it can save you lots of bandwidth when excluding data intensive fields you don't need. ''' q = self._clone() new_fields = [] for field in fields: if JSPLITTER in field: bits = field.split(JSPLITTER) related = self._get_related_field(bits[0]) if related: q._add_to_load_related(related, JSPLITTER.join(bits[1:])) continue new_fields.append(field) if fields and not new_fields: # if we added a field to the load_related list and not fields are # are left we add the primary key so that other firls are not # loaded. new_fields.append(self._meta.pkname()) fs = unique_tuple(q.fields, new_fields) q.data['fields'] = fs if fs else None return q
python
def load_only(self, *fields): '''This is provides a :ref:`performance boost <increase-performance>` in cases when you need to load a subset of fields of your model. The boost achieved is less than the one obtained when using :meth:`Query.load_related`, since it does not reduce the number of requests to the database. However, it can save you lots of bandwidth when excluding data intensive fields you don't need. ''' q = self._clone() new_fields = [] for field in fields: if JSPLITTER in field: bits = field.split(JSPLITTER) related = self._get_related_field(bits[0]) if related: q._add_to_load_related(related, JSPLITTER.join(bits[1:])) continue new_fields.append(field) if fields and not new_fields: # if we added a field to the load_related list and not fields are # are left we add the primary key so that other firls are not # loaded. new_fields.append(self._meta.pkname()) fs = unique_tuple(q.fields, new_fields) q.data['fields'] = fs if fs else None return q
[ "def", "load_only", "(", "self", ",", "*", "fields", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "new_fields", "=", "[", "]", "for", "field", "in", "fields", ":", "if", "JSPLITTER", "in", "field", ":", "bits", "=", "field", ".", "split", "(", "JSPLITTER", ")", "related", "=", "self", ".", "_get_related_field", "(", "bits", "[", "0", "]", ")", "if", "related", ":", "q", ".", "_add_to_load_related", "(", "related", ",", "JSPLITTER", ".", "join", "(", "bits", "[", "1", ":", "]", ")", ")", "continue", "new_fields", ".", "append", "(", "field", ")", "if", "fields", "and", "not", "new_fields", ":", "# if we added a field to the load_related list and not fields are\r", "# are left we add the primary key so that other firls are not\r", "# loaded.\r", "new_fields", ".", "append", "(", "self", ".", "_meta", ".", "pkname", "(", ")", ")", "fs", "=", "unique_tuple", "(", "q", ".", "fields", ",", "new_fields", ")", "q", ".", "data", "[", "'fields'", "]", "=", "fs", "if", "fs", "else", "None", "return", "q" ]
This is provides a :ref:`performance boost <increase-performance>` in cases when you need to load a subset of fields of your model. The boost achieved is less than the one obtained when using :meth:`Query.load_related`, since it does not reduce the number of requests to the database. However, it can save you lots of bandwidth when excluding data intensive fields you don't need.
[ "This", "is", "provides", "a", ":", "ref", ":", "performance", "boost", "<increase", "-", "performance", ">", "in", "cases", "when", "you", "need", "to", "load", "a", "subset", "of", "fields", "of", "your", "model", ".", "The", "boost", "achieved", "is", "less", "than", "the", "one", "obtained", "when", "using", ":", "meth", ":", "Query", ".", "load_related", "since", "it", "does", "not", "reduce", "the", "number", "of", "requests", "to", "the", "database", ".", "However", "it", "can", "save", "you", "lots", "of", "bandwidth", "when", "excluding", "data", "intensive", "fields", "you", "don", "t", "need", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L548-L573
lsbardel/python-stdnet
stdnet/odm/query.py
Query.dont_load
def dont_load(self, *fields): '''Works like :meth:`load_only` to provides a :ref:`performance boost <increase-performance>` in cases when you need to load all fields except a subset specified by *fields*. ''' q = self._clone() fs = unique_tuple(q.exclude_fields, fields) q.exclude_fields = fs if fs else None return q
python
def dont_load(self, *fields): '''Works like :meth:`load_only` to provides a :ref:`performance boost <increase-performance>` in cases when you need to load all fields except a subset specified by *fields*. ''' q = self._clone() fs = unique_tuple(q.exclude_fields, fields) q.exclude_fields = fs if fs else None return q
[ "def", "dont_load", "(", "self", ",", "*", "fields", ")", ":", "q", "=", "self", ".", "_clone", "(", ")", "fs", "=", "unique_tuple", "(", "q", ".", "exclude_fields", ",", "fields", ")", "q", ".", "exclude_fields", "=", "fs", "if", "fs", "else", "None", "return", "q" ]
Works like :meth:`load_only` to provides a :ref:`performance boost <increase-performance>` in cases when you need to load all fields except a subset specified by *fields*.
[ "Works", "like", ":", "meth", ":", "load_only", "to", "provides", "a", ":", "ref", ":", "performance", "boost", "<increase", "-", "performance", ">", "in", "cases", "when", "you", "need", "to", "load", "all", "fields", "except", "a", "subset", "specified", "by", "*", "fields", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L575-L583
lsbardel/python-stdnet
stdnet/odm/query.py
Query.get
def get(self, **kwargs): '''Return an instance of a model matching the query. A special case is the query on ``id`` which provides a direct access to the :attr:`session` instances. If the given primary key is present in the session, the object is returned directly without performing any query.''' return self.filter(**kwargs).items( callback=self.model.get_unique_instance)
python
def get(self, **kwargs): '''Return an instance of a model matching the query. A special case is the query on ``id`` which provides a direct access to the :attr:`session` instances. If the given primary key is present in the session, the object is returned directly without performing any query.''' return self.filter(**kwargs).items( callback=self.model.get_unique_instance)
[ "def", "get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "filter", "(", "*", "*", "kwargs", ")", ".", "items", "(", "callback", "=", "self", ".", "model", ".", "get_unique_instance", ")" ]
Return an instance of a model matching the query. A special case is the query on ``id`` which provides a direct access to the :attr:`session` instances. If the given primary key is present in the session, the object is returned directly without performing any query.
[ "Return", "an", "instance", "of", "a", "model", "matching", "the", "query", ".", "A", "special", "case", "is", "the", "query", "on", "id", "which", "provides", "a", "direct", "access", "to", "the", ":", "attr", ":", "session", "instances", ".", "If", "the", "given", "primary", "key", "is", "present", "in", "the", "session", "the", "object", "is", "returned", "directly", "without", "performing", "any", "query", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L594-L600
lsbardel/python-stdnet
stdnet/odm/query.py
Query.construct
def construct(self): '''Build the :class:`QueryElement` representing this query.''' if self.__construct is None: self.__construct = self._construct() return self.__construct
python
def construct(self): '''Build the :class:`QueryElement` representing this query.''' if self.__construct is None: self.__construct = self._construct() return self.__construct
[ "def", "construct", "(", "self", ")", ":", "if", "self", ".", "__construct", "is", "None", ":", "self", ".", "__construct", "=", "self", ".", "_construct", "(", ")", "return", "self", ".", "__construct" ]
Build the :class:`QueryElement` representing this query.
[ "Build", "the", ":", "class", ":", "QueryElement", "representing", "this", "query", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L615-L619
lsbardel/python-stdnet
stdnet/odm/query.py
Query.backend_query
def backend_query(self, **kwargs): '''Build and return the :class:`stdnet.utils.async.BackendQuery`. This is a lazy method in the sense that it is evaluated once only and its result stored for future retrieval.''' q = self.construct() return q if isinstance(q, EmptyQuery) else q.backend_query(**kwargs)
python
def backend_query(self, **kwargs): '''Build and return the :class:`stdnet.utils.async.BackendQuery`. This is a lazy method in the sense that it is evaluated once only and its result stored for future retrieval.''' q = self.construct() return q if isinstance(q, EmptyQuery) else q.backend_query(**kwargs)
[ "def", "backend_query", "(", "self", ",", "*", "*", "kwargs", ")", ":", "q", "=", "self", ".", "construct", "(", ")", "return", "q", "if", "isinstance", "(", "q", ",", "EmptyQuery", ")", "else", "q", ".", "backend_query", "(", "*", "*", "kwargs", ")" ]
Build and return the :class:`stdnet.utils.async.BackendQuery`. This is a lazy method in the sense that it is evaluated once only and its result stored for future retrieval.
[ "Build", "and", "return", "the", ":", "class", ":", "stdnet", ".", "utils", ".", "async", ".", "BackendQuery", ".", "This", "is", "a", "lazy", "method", "in", "the", "sense", "that", "it", "is", "evaluated", "once", "only", "and", "its", "result", "stored", "for", "future", "retrieval", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L621-L626
lsbardel/python-stdnet
stdnet/odm/query.py
Query.aggregate
def aggregate(self, kwargs): '''Aggregate lookup parameters.''' meta = self._meta fields = meta.dfields field_lookups = {} for name, value in iteritems(kwargs): bits = name.split(JSPLITTER) field_name = bits.pop(0) if field_name not in fields: raise QuerySetError('Could not filter on model "{0}".\ Field "{1}" does not exist.'.format(meta, field_name)) field = fields[field_name] attname = field.attname lookup = None if bits: bits = [n.lower() for n in bits] if bits[-1] == 'in': bits.pop() elif bits[-1] in range_lookups: lookup = bits.pop() remaining = JSPLITTER.join(bits) if lookup: # this is a range lookup attname, nested = field.get_lookup(remaining, QuerySetError) lookups = get_lookups(attname, field_lookups) lookups.append(lookup_value(lookup, (value, nested))) continue elif remaining: # Not a range lookup, must be a nested filter value = field.filter(self.session, remaining, value) lookups = get_lookups(attname, field_lookups) # If we are here the field must be an index if not field.index: raise QuerySetError("%s %s is not an index. Cannot query." % (field.__class__.__name__, field_name)) if not iterable(value): value = (value,) for v in value: if isinstance(v, Q): v = lookup_value('set', v.construct()) else: v = lookup_value('value', field.serialise(v, lookup)) lookups.append(v) # return [queryset(self, name=name, underlying=field_lookups[name]) for name in sorted(field_lookups)]
python
def aggregate(self, kwargs): '''Aggregate lookup parameters.''' meta = self._meta fields = meta.dfields field_lookups = {} for name, value in iteritems(kwargs): bits = name.split(JSPLITTER) field_name = bits.pop(0) if field_name not in fields: raise QuerySetError('Could not filter on model "{0}".\ Field "{1}" does not exist.'.format(meta, field_name)) field = fields[field_name] attname = field.attname lookup = None if bits: bits = [n.lower() for n in bits] if bits[-1] == 'in': bits.pop() elif bits[-1] in range_lookups: lookup = bits.pop() remaining = JSPLITTER.join(bits) if lookup: # this is a range lookup attname, nested = field.get_lookup(remaining, QuerySetError) lookups = get_lookups(attname, field_lookups) lookups.append(lookup_value(lookup, (value, nested))) continue elif remaining: # Not a range lookup, must be a nested filter value = field.filter(self.session, remaining, value) lookups = get_lookups(attname, field_lookups) # If we are here the field must be an index if not field.index: raise QuerySetError("%s %s is not an index. Cannot query." % (field.__class__.__name__, field_name)) if not iterable(value): value = (value,) for v in value: if isinstance(v, Q): v = lookup_value('set', v.construct()) else: v = lookup_value('value', field.serialise(v, lookup)) lookups.append(v) # return [queryset(self, name=name, underlying=field_lookups[name]) for name in sorted(field_lookups)]
[ "def", "aggregate", "(", "self", ",", "kwargs", ")", ":", "meta", "=", "self", ".", "_meta", "fields", "=", "meta", ".", "dfields", "field_lookups", "=", "{", "}", "for", "name", ",", "value", "in", "iteritems", "(", "kwargs", ")", ":", "bits", "=", "name", ".", "split", "(", "JSPLITTER", ")", "field_name", "=", "bits", ".", "pop", "(", "0", ")", "if", "field_name", "not", "in", "fields", ":", "raise", "QuerySetError", "(", "'Could not filter on model \"{0}\".\\\r\n Field \"{1}\" does not exist.'", ".", "format", "(", "meta", ",", "field_name", ")", ")", "field", "=", "fields", "[", "field_name", "]", "attname", "=", "field", ".", "attname", "lookup", "=", "None", "if", "bits", ":", "bits", "=", "[", "n", ".", "lower", "(", ")", "for", "n", "in", "bits", "]", "if", "bits", "[", "-", "1", "]", "==", "'in'", ":", "bits", ".", "pop", "(", ")", "elif", "bits", "[", "-", "1", "]", "in", "range_lookups", ":", "lookup", "=", "bits", ".", "pop", "(", ")", "remaining", "=", "JSPLITTER", ".", "join", "(", "bits", ")", "if", "lookup", ":", "# this is a range lookup\r", "attname", ",", "nested", "=", "field", ".", "get_lookup", "(", "remaining", ",", "QuerySetError", ")", "lookups", "=", "get_lookups", "(", "attname", ",", "field_lookups", ")", "lookups", ".", "append", "(", "lookup_value", "(", "lookup", ",", "(", "value", ",", "nested", ")", ")", ")", "continue", "elif", "remaining", ":", "# Not a range lookup, must be a nested filter\r", "value", "=", "field", ".", "filter", "(", "self", ".", "session", ",", "remaining", ",", "value", ")", "lookups", "=", "get_lookups", "(", "attname", ",", "field_lookups", ")", "# If we are here the field must be an index\r", "if", "not", "field", ".", "index", ":", "raise", "QuerySetError", "(", "\"%s %s is not an index. Cannot query.\"", "%", "(", "field", ".", "__class__", ".", "__name__", ",", "field_name", ")", ")", "if", "not", "iterable", "(", "value", ")", ":", "value", "=", "(", "value", ",", ")", "for", "v", "in", "value", ":", "if", "isinstance", "(", "v", ",", "Q", ")", ":", "v", "=", "lookup_value", "(", "'set'", ",", "v", ".", "construct", "(", ")", ")", "else", ":", "v", "=", "lookup_value", "(", "'value'", ",", "field", ".", "serialise", "(", "v", ",", "lookup", ")", ")", "lookups", ".", "append", "(", "v", ")", "#\r", "return", "[", "queryset", "(", "self", ",", "name", "=", "name", ",", "underlying", "=", "field_lookups", "[", "name", "]", ")", "for", "name", "in", "sorted", "(", "field_lookups", ")", "]" ]
Aggregate lookup parameters.
[ "Aggregate", "lookup", "parameters", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L698-L742
lsbardel/python-stdnet
stdnet/odm/mapper.py
models_from_model
def models_from_model(model, include_related=False, exclude=None): '''Generator of all model in model.''' if exclude is None: exclude = set() if model and model not in exclude: exclude.add(model) if isinstance(model, ModelType) and not model._meta.abstract: yield model if include_related: exclude.add(model) for field in model._meta.fields: if hasattr(field, 'relmodel'): through = getattr(field, 'through', None) for rmodel in (field.relmodel, field.model, through): for m in models_from_model( rmodel, include_related=include_related, exclude=exclude): yield m for manytomany in model._meta.manytomany: related = getattr(model, manytomany) for m in models_from_model(related.model, include_related=include_related, exclude=exclude): yield m elif not isinstance(model, ModelType) and isclass(model): # This is a class which is not o ModelType yield model
python
def models_from_model(model, include_related=False, exclude=None): '''Generator of all model in model.''' if exclude is None: exclude = set() if model and model not in exclude: exclude.add(model) if isinstance(model, ModelType) and not model._meta.abstract: yield model if include_related: exclude.add(model) for field in model._meta.fields: if hasattr(field, 'relmodel'): through = getattr(field, 'through', None) for rmodel in (field.relmodel, field.model, through): for m in models_from_model( rmodel, include_related=include_related, exclude=exclude): yield m for manytomany in model._meta.manytomany: related = getattr(model, manytomany) for m in models_from_model(related.model, include_related=include_related, exclude=exclude): yield m elif not isinstance(model, ModelType) and isclass(model): # This is a class which is not o ModelType yield model
[ "def", "models_from_model", "(", "model", ",", "include_related", "=", "False", ",", "exclude", "=", "None", ")", ":", "if", "exclude", "is", "None", ":", "exclude", "=", "set", "(", ")", "if", "model", "and", "model", "not", "in", "exclude", ":", "exclude", ".", "add", "(", "model", ")", "if", "isinstance", "(", "model", ",", "ModelType", ")", "and", "not", "model", ".", "_meta", ".", "abstract", ":", "yield", "model", "if", "include_related", ":", "exclude", ".", "add", "(", "model", ")", "for", "field", "in", "model", ".", "_meta", ".", "fields", ":", "if", "hasattr", "(", "field", ",", "'relmodel'", ")", ":", "through", "=", "getattr", "(", "field", ",", "'through'", ",", "None", ")", "for", "rmodel", "in", "(", "field", ".", "relmodel", ",", "field", ".", "model", ",", "through", ")", ":", "for", "m", "in", "models_from_model", "(", "rmodel", ",", "include_related", "=", "include_related", ",", "exclude", "=", "exclude", ")", ":", "yield", "m", "for", "manytomany", "in", "model", ".", "_meta", ".", "manytomany", ":", "related", "=", "getattr", "(", "model", ",", "manytomany", ")", "for", "m", "in", "models_from_model", "(", "related", ".", "model", ",", "include_related", "=", "include_related", ",", "exclude", "=", "exclude", ")", ":", "yield", "m", "elif", "not", "isinstance", "(", "model", ",", "ModelType", ")", "and", "isclass", "(", "model", ")", ":", "# This is a class which is not o ModelType", "yield", "model" ]
Generator of all model in model.
[ "Generator", "of", "all", "model", "in", "model", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L281-L307
lsbardel/python-stdnet
stdnet/odm/mapper.py
model_iterator
def model_iterator(application, include_related=True, exclude=None): '''A generator of :class:`StdModel` classes found in *application*. :parameter application: A python dotted path or an iterable over python dotted-paths where models are defined. Only models defined in these paths are considered. For example:: from stdnet.odm import model_iterator APPS = ('stdnet.contrib.searchengine', 'stdnet.contrib.timeseries') for model in model_iterator(APPS): ... ''' if exclude is None: exclude = set() application = native_str(application) if ismodule(application) or isinstance(application, str): if ismodule(application): mod, application = application, application.__name__ else: try: mod = import_module(application) except ImportError: # the module is not there mod = None if mod: label = application.split('.')[-1] try: mod_models = import_module('.models', application) except ImportError: mod_models = mod label = getattr(mod_models, 'app_label', label) models = set() for name in dir(mod_models): value = getattr(mod_models, name) meta = getattr(value, '_meta', None) if isinstance(value, ModelType) and meta: for model in models_from_model( value, include_related=include_related, exclude=exclude): if (model._meta.app_label == label and model not in models): models.add(model) yield model else: for app in application: for m in model_iterator(app): yield m
python
def model_iterator(application, include_related=True, exclude=None): '''A generator of :class:`StdModel` classes found in *application*. :parameter application: A python dotted path or an iterable over python dotted-paths where models are defined. Only models defined in these paths are considered. For example:: from stdnet.odm import model_iterator APPS = ('stdnet.contrib.searchengine', 'stdnet.contrib.timeseries') for model in model_iterator(APPS): ... ''' if exclude is None: exclude = set() application = native_str(application) if ismodule(application) or isinstance(application, str): if ismodule(application): mod, application = application, application.__name__ else: try: mod = import_module(application) except ImportError: # the module is not there mod = None if mod: label = application.split('.')[-1] try: mod_models = import_module('.models', application) except ImportError: mod_models = mod label = getattr(mod_models, 'app_label', label) models = set() for name in dir(mod_models): value = getattr(mod_models, name) meta = getattr(value, '_meta', None) if isinstance(value, ModelType) and meta: for model in models_from_model( value, include_related=include_related, exclude=exclude): if (model._meta.app_label == label and model not in models): models.add(model) yield model else: for app in application: for m in model_iterator(app): yield m
[ "def", "model_iterator", "(", "application", ",", "include_related", "=", "True", ",", "exclude", "=", "None", ")", ":", "if", "exclude", "is", "None", ":", "exclude", "=", "set", "(", ")", "application", "=", "native_str", "(", "application", ")", "if", "ismodule", "(", "application", ")", "or", "isinstance", "(", "application", ",", "str", ")", ":", "if", "ismodule", "(", "application", ")", ":", "mod", ",", "application", "=", "application", ",", "application", ".", "__name__", "else", ":", "try", ":", "mod", "=", "import_module", "(", "application", ")", "except", "ImportError", ":", "# the module is not there", "mod", "=", "None", "if", "mod", ":", "label", "=", "application", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "try", ":", "mod_models", "=", "import_module", "(", "'.models'", ",", "application", ")", "except", "ImportError", ":", "mod_models", "=", "mod", "label", "=", "getattr", "(", "mod_models", ",", "'app_label'", ",", "label", ")", "models", "=", "set", "(", ")", "for", "name", "in", "dir", "(", "mod_models", ")", ":", "value", "=", "getattr", "(", "mod_models", ",", "name", ")", "meta", "=", "getattr", "(", "value", ",", "'_meta'", ",", "None", ")", "if", "isinstance", "(", "value", ",", "ModelType", ")", "and", "meta", ":", "for", "model", "in", "models_from_model", "(", "value", ",", "include_related", "=", "include_related", ",", "exclude", "=", "exclude", ")", ":", "if", "(", "model", ".", "_meta", ".", "app_label", "==", "label", "and", "model", "not", "in", "models", ")", ":", "models", ".", "add", "(", "model", ")", "yield", "model", "else", ":", "for", "app", "in", "application", ":", "for", "m", "in", "model_iterator", "(", "app", ")", ":", "yield", "m" ]
A generator of :class:`StdModel` classes found in *application*. :parameter application: A python dotted path or an iterable over python dotted-paths where models are defined. Only models defined in these paths are considered. For example:: from stdnet.odm import model_iterator APPS = ('stdnet.contrib.searchengine', 'stdnet.contrib.timeseries') for model in model_iterator(APPS): ...
[ "A", "generator", "of", ":", "class", ":", "StdModel", "classes", "found", "in", "*", "application", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L310-L363
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.set_search_engine
def set_search_engine(self, engine): '''Set the search ``engine`` for this :class:`Router`.''' self._search_engine = engine self._search_engine.set_router(self)
python
def set_search_engine(self, engine): '''Set the search ``engine`` for this :class:`Router`.''' self._search_engine = engine self._search_engine.set_router(self)
[ "def", "set_search_engine", "(", "self", ",", "engine", ")", ":", "self", ".", "_search_engine", "=", "engine", "self", ".", "_search_engine", ".", "set_router", "(", "self", ")" ]
Set the search ``engine`` for this :class:`Router`.
[ "Set", "the", "search", "engine", "for", "this", ":", "class", ":", "Router", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L112-L115
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.register
def register(self, model, backend=None, read_backend=None, include_related=True, **params): '''Register a :class:`Model` with this :class:`Router`. If the model was already registered it does nothing. :param model: a :class:`Model` class. :param backend: a :class:`stdnet.BackendDataServer` or a :ref:`connection string <connection-string>`. :param read_backend: Optional :class:`stdnet.BackendDataServer` for read operations. This is useful when the server has a master/slave configuration, where the master accept write and read operations and the ``slave`` read only operations. :param include_related: ``True`` if related models to ``model`` needs to be registered. Default ``True``. :param params: Additional parameters for the :func:`getdb` function. :return: the number of models registered. ''' backend = backend or self._default_backend backend = getdb(backend=backend, **params) if read_backend: read_backend = getdb(read_backend) registered = 0 if isinstance(model, Structure): self._structures[model] = StructureManager(model, backend, read_backend, self) return model for model in models_from_model(model, include_related=include_related): if model in self._registered_models: continue registered += 1 default_manager = backend.default_manager or Manager manager_class = getattr(model, 'manager_class', default_manager) manager = manager_class(model, backend, read_backend, self) self._registered_models[model] = manager if isinstance(model, ModelType): attr_name = model._meta.name else: attr_name = model.__name__.lower() if attr_name not in self._registered_names: self._registered_names[attr_name] = manager if self._install_global: model.objects = manager if registered: return backend
python
def register(self, model, backend=None, read_backend=None, include_related=True, **params): '''Register a :class:`Model` with this :class:`Router`. If the model was already registered it does nothing. :param model: a :class:`Model` class. :param backend: a :class:`stdnet.BackendDataServer` or a :ref:`connection string <connection-string>`. :param read_backend: Optional :class:`stdnet.BackendDataServer` for read operations. This is useful when the server has a master/slave configuration, where the master accept write and read operations and the ``slave`` read only operations. :param include_related: ``True`` if related models to ``model`` needs to be registered. Default ``True``. :param params: Additional parameters for the :func:`getdb` function. :return: the number of models registered. ''' backend = backend or self._default_backend backend = getdb(backend=backend, **params) if read_backend: read_backend = getdb(read_backend) registered = 0 if isinstance(model, Structure): self._structures[model] = StructureManager(model, backend, read_backend, self) return model for model in models_from_model(model, include_related=include_related): if model in self._registered_models: continue registered += 1 default_manager = backend.default_manager or Manager manager_class = getattr(model, 'manager_class', default_manager) manager = manager_class(model, backend, read_backend, self) self._registered_models[model] = manager if isinstance(model, ModelType): attr_name = model._meta.name else: attr_name = model.__name__.lower() if attr_name not in self._registered_names: self._registered_names[attr_name] = manager if self._install_global: model.objects = manager if registered: return backend
[ "def", "register", "(", "self", ",", "model", ",", "backend", "=", "None", ",", "read_backend", "=", "None", ",", "include_related", "=", "True", ",", "*", "*", "params", ")", ":", "backend", "=", "backend", "or", "self", ".", "_default_backend", "backend", "=", "getdb", "(", "backend", "=", "backend", ",", "*", "*", "params", ")", "if", "read_backend", ":", "read_backend", "=", "getdb", "(", "read_backend", ")", "registered", "=", "0", "if", "isinstance", "(", "model", ",", "Structure", ")", ":", "self", ".", "_structures", "[", "model", "]", "=", "StructureManager", "(", "model", ",", "backend", ",", "read_backend", ",", "self", ")", "return", "model", "for", "model", "in", "models_from_model", "(", "model", ",", "include_related", "=", "include_related", ")", ":", "if", "model", "in", "self", ".", "_registered_models", ":", "continue", "registered", "+=", "1", "default_manager", "=", "backend", ".", "default_manager", "or", "Manager", "manager_class", "=", "getattr", "(", "model", ",", "'manager_class'", ",", "default_manager", ")", "manager", "=", "manager_class", "(", "model", ",", "backend", ",", "read_backend", ",", "self", ")", "self", ".", "_registered_models", "[", "model", "]", "=", "manager", "if", "isinstance", "(", "model", ",", "ModelType", ")", ":", "attr_name", "=", "model", ".", "_meta", ".", "name", "else", ":", "attr_name", "=", "model", ".", "__name__", ".", "lower", "(", ")", "if", "attr_name", "not", "in", "self", ".", "_registered_names", ":", "self", ".", "_registered_names", "[", "attr_name", "]", "=", "manager", "if", "self", ".", "_install_global", ":", "model", ".", "objects", "=", "manager", "if", "registered", ":", "return", "backend" ]
Register a :class:`Model` with this :class:`Router`. If the model was already registered it does nothing. :param model: a :class:`Model` class. :param backend: a :class:`stdnet.BackendDataServer` or a :ref:`connection string <connection-string>`. :param read_backend: Optional :class:`stdnet.BackendDataServer` for read operations. This is useful when the server has a master/slave configuration, where the master accept write and read operations and the ``slave`` read only operations. :param include_related: ``True`` if related models to ``model`` needs to be registered. Default ``True``. :param params: Additional parameters for the :func:`getdb` function. :return: the number of models registered.
[ "Register", "a", ":", "class", ":", "Model", "with", "this", ":", "class", ":", "Router", ".", "If", "the", "model", "was", "already", "registered", "it", "does", "nothing", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L117-L160
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.from_uuid
def from_uuid(self, uuid, session=None): '''Retrieve a :class:`Model` from its universally unique identifier ``uuid``. If the ``uuid`` does not match any instance an exception will raise. ''' elems = uuid.split('.') if len(elems) == 2: model = get_model_from_hash(elems[0]) if not model: raise Model.DoesNotExist( 'model id "{0}" not available'.format(elems[0])) if not session or session.router is not self: session = self.session() return session.query(model).get(id=elems[1]) raise Model.DoesNotExist('uuid "{0}" not recognized'.format(uuid))
python
def from_uuid(self, uuid, session=None): '''Retrieve a :class:`Model` from its universally unique identifier ``uuid``. If the ``uuid`` does not match any instance an exception will raise. ''' elems = uuid.split('.') if len(elems) == 2: model = get_model_from_hash(elems[0]) if not model: raise Model.DoesNotExist( 'model id "{0}" not available'.format(elems[0])) if not session or session.router is not self: session = self.session() return session.query(model).get(id=elems[1]) raise Model.DoesNotExist('uuid "{0}" not recognized'.format(uuid))
[ "def", "from_uuid", "(", "self", ",", "uuid", ",", "session", "=", "None", ")", ":", "elems", "=", "uuid", ".", "split", "(", "'.'", ")", "if", "len", "(", "elems", ")", "==", "2", ":", "model", "=", "get_model_from_hash", "(", "elems", "[", "0", "]", ")", "if", "not", "model", ":", "raise", "Model", ".", "DoesNotExist", "(", "'model id \"{0}\" not available'", ".", "format", "(", "elems", "[", "0", "]", ")", ")", "if", "not", "session", "or", "session", ".", "router", "is", "not", "self", ":", "session", "=", "self", ".", "session", "(", ")", "return", "session", ".", "query", "(", "model", ")", ".", "get", "(", "id", "=", "elems", "[", "1", "]", ")", "raise", "Model", ".", "DoesNotExist", "(", "'uuid \"{0}\" not recognized'", ".", "format", "(", "uuid", ")", ")" ]
Retrieve a :class:`Model` from its universally unique identifier ``uuid``. If the ``uuid`` does not match any instance an exception will raise.
[ "Retrieve", "a", ":", "class", ":", "Model", "from", "its", "universally", "unique", "identifier", "uuid", ".", "If", "the", "uuid", "does", "not", "match", "any", "instance", "an", "exception", "will", "raise", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L162-L175
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.flush
def flush(self, exclude=None, include=None, dryrun=False): '''Flush :attr:`registered_models`. :param exclude: optional list of model names to exclude. :param include: optional list of model names to include. :param dryrun: Doesn't remove anything, simply collect managers to flush. :return: ''' exclude = exclude or [] results = [] for manager in self._registered_models.values(): m = manager._meta if include is not None and not (m.modelkey in include or m.app_label in include): continue if not (m.modelkey in exclude or m.app_label in exclude): if dryrun: results.append(manager) else: results.append(manager.flush()) return results
python
def flush(self, exclude=None, include=None, dryrun=False): '''Flush :attr:`registered_models`. :param exclude: optional list of model names to exclude. :param include: optional list of model names to include. :param dryrun: Doesn't remove anything, simply collect managers to flush. :return: ''' exclude = exclude or [] results = [] for manager in self._registered_models.values(): m = manager._meta if include is not None and not (m.modelkey in include or m.app_label in include): continue if not (m.modelkey in exclude or m.app_label in exclude): if dryrun: results.append(manager) else: results.append(manager.flush()) return results
[ "def", "flush", "(", "self", ",", "exclude", "=", "None", ",", "include", "=", "None", ",", "dryrun", "=", "False", ")", ":", "exclude", "=", "exclude", "or", "[", "]", "results", "=", "[", "]", "for", "manager", "in", "self", ".", "_registered_models", ".", "values", "(", ")", ":", "m", "=", "manager", ".", "_meta", "if", "include", "is", "not", "None", "and", "not", "(", "m", ".", "modelkey", "in", "include", "or", "m", ".", "app_label", "in", "include", ")", ":", "continue", "if", "not", "(", "m", ".", "modelkey", "in", "exclude", "or", "m", ".", "app_label", "in", "exclude", ")", ":", "if", "dryrun", ":", "results", ".", "append", "(", "manager", ")", "else", ":", "results", ".", "append", "(", "manager", ".", "flush", "(", ")", ")", "return", "results" ]
Flush :attr:`registered_models`. :param exclude: optional list of model names to exclude. :param include: optional list of model names to include. :param dryrun: Doesn't remove anything, simply collect managers to flush. :return:
[ "Flush", ":", "attr", ":", "registered_models", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L177-L198
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.unregister
def unregister(self, model=None): '''Unregister a ``model`` if provided, otherwise it unregister all registered models. Return a list of unregistered model managers or ``None`` if no managers were removed.''' if model is not None: try: manager = self._registered_models.pop(model) except KeyError: return if self._registered_names.get(manager._meta.name) == manager: self._registered_names.pop(manager._meta.name) return [manager] else: managers = list(self._registered_models.values()) self._registered_models.clear() return managers
python
def unregister(self, model=None): '''Unregister a ``model`` if provided, otherwise it unregister all registered models. Return a list of unregistered model managers or ``None`` if no managers were removed.''' if model is not None: try: manager = self._registered_models.pop(model) except KeyError: return if self._registered_names.get(manager._meta.name) == manager: self._registered_names.pop(manager._meta.name) return [manager] else: managers = list(self._registered_models.values()) self._registered_models.clear() return managers
[ "def", "unregister", "(", "self", ",", "model", "=", "None", ")", ":", "if", "model", "is", "not", "None", ":", "try", ":", "manager", "=", "self", ".", "_registered_models", ".", "pop", "(", "model", ")", "except", "KeyError", ":", "return", "if", "self", ".", "_registered_names", ".", "get", "(", "manager", ".", "_meta", ".", "name", ")", "==", "manager", ":", "self", ".", "_registered_names", ".", "pop", "(", "manager", ".", "_meta", ".", "name", ")", "return", "[", "manager", "]", "else", ":", "managers", "=", "list", "(", "self", ".", "_registered_models", ".", "values", "(", ")", ")", "self", ".", "_registered_models", ".", "clear", "(", ")", "return", "managers" ]
Unregister a ``model`` if provided, otherwise it unregister all registered models. Return a list of unregistered model managers or ``None`` if no managers were removed.
[ "Unregister", "a", "model", "if", "provided", "otherwise", "it", "unregister", "all", "registered", "models", ".", "Return", "a", "list", "of", "unregistered", "model", "managers", "or", "None", "if", "no", "managers", "were", "removed", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L200-L215
lsbardel/python-stdnet
stdnet/odm/mapper.py
Router.register_applications
def register_applications(self, applications, models=None, backends=None): '''A higher level registration functions for group of models located on application modules. It uses the :func:`model_iterator` function to iterate through all :class:`Model` models available in ``applications`` and register them using the :func:`register` low level method. :parameter applications: A String or a list of strings representing python dotted paths where models are implemented. :parameter models: Optional list of models to include. If not provided all models found in *applications* will be included. :parameter backends: optional dictionary which map a model or an application to a backend :ref:`connection string <connection-string>`. :rtype: A list of registered :class:`Model`. For example:: mapper.register_application_models('mylib.myapp') mapper.register_application_models(['mylib.myapp', 'another.path']) mapper.register_application_models(pythonmodule) mapper.register_application_models(['mylib.myapp',pythonmodule]) ''' return list(self._register_applications(applications, models, backends))
python
def register_applications(self, applications, models=None, backends=None): '''A higher level registration functions for group of models located on application modules. It uses the :func:`model_iterator` function to iterate through all :class:`Model` models available in ``applications`` and register them using the :func:`register` low level method. :parameter applications: A String or a list of strings representing python dotted paths where models are implemented. :parameter models: Optional list of models to include. If not provided all models found in *applications* will be included. :parameter backends: optional dictionary which map a model or an application to a backend :ref:`connection string <connection-string>`. :rtype: A list of registered :class:`Model`. For example:: mapper.register_application_models('mylib.myapp') mapper.register_application_models(['mylib.myapp', 'another.path']) mapper.register_application_models(pythonmodule) mapper.register_application_models(['mylib.myapp',pythonmodule]) ''' return list(self._register_applications(applications, models, backends))
[ "def", "register_applications", "(", "self", ",", "applications", ",", "models", "=", "None", ",", "backends", "=", "None", ")", ":", "return", "list", "(", "self", ".", "_register_applications", "(", "applications", ",", "models", ",", "backends", ")", ")" ]
A higher level registration functions for group of models located on application modules. It uses the :func:`model_iterator` function to iterate through all :class:`Model` models available in ``applications`` and register them using the :func:`register` low level method. :parameter applications: A String or a list of strings representing python dotted paths where models are implemented. :parameter models: Optional list of models to include. If not provided all models found in *applications* will be included. :parameter backends: optional dictionary which map a model or an application to a backend :ref:`connection string <connection-string>`. :rtype: A list of registered :class:`Model`. For example:: mapper.register_application_models('mylib.myapp') mapper.register_application_models(['mylib.myapp', 'another.path']) mapper.register_application_models(pythonmodule) mapper.register_application_models(['mylib.myapp',pythonmodule])
[ "A", "higher", "level", "registration", "functions", "for", "group", "of", "models", "located", "on", "application", "modules", ".", "It", "uses", "the", ":", "func", ":", "model_iterator", "function", "to", "iterate", "through", "all", ":", "class", ":", "Model", "models", "available", "in", "applications", "and", "register", "them", "using", "the", ":", "func", ":", "register", "low", "level", "method", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/mapper.py#L217-L242
lsbardel/python-stdnet
stdnet/backends/redisb/client/async.py
Redis.execute_script
def execute_script(self, name, keys, *args, **options): '''Execute a script. makes sure all required scripts are loaded. ''' script = get_script(name) if not script: raise redis.RedisError('No such script "%s"' % name) address = self.address() if address not in all_loaded_scripts: all_loaded_scripts[address] = set() loaded = all_loaded_scripts[address] toload = script.required_scripts.difference(loaded) for name in toload: s = get_script(name) yield self.script_load(s.script) loaded.update(toload) yield script(self, keys, args, options)
python
def execute_script(self, name, keys, *args, **options): '''Execute a script. makes sure all required scripts are loaded. ''' script = get_script(name) if not script: raise redis.RedisError('No such script "%s"' % name) address = self.address() if address not in all_loaded_scripts: all_loaded_scripts[address] = set() loaded = all_loaded_scripts[address] toload = script.required_scripts.difference(loaded) for name in toload: s = get_script(name) yield self.script_load(s.script) loaded.update(toload) yield script(self, keys, args, options)
[ "def", "execute_script", "(", "self", ",", "name", ",", "keys", ",", "*", "args", ",", "*", "*", "options", ")", ":", "script", "=", "get_script", "(", "name", ")", "if", "not", "script", ":", "raise", "redis", ".", "RedisError", "(", "'No such script \"%s\"'", "%", "name", ")", "address", "=", "self", ".", "address", "(", ")", "if", "address", "not", "in", "all_loaded_scripts", ":", "all_loaded_scripts", "[", "address", "]", "=", "set", "(", ")", "loaded", "=", "all_loaded_scripts", "[", "address", "]", "toload", "=", "script", ".", "required_scripts", ".", "difference", "(", "loaded", ")", "for", "name", "in", "toload", ":", "s", "=", "get_script", "(", "name", ")", "yield", "self", ".", "script_load", "(", "s", ".", "script", ")", "loaded", ".", "update", "(", "toload", ")", "yield", "script", "(", "self", ",", "keys", ",", "args", ",", "options", ")" ]
Execute a script. makes sure all required scripts are loaded.
[ "Execute", "a", "script", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/redisb/client/async.py#L40-L57
lsbardel/python-stdnet
stdnet/odm/search.py
SearchEngine.register
def register(self, model, related=None): '''Register a :class:`StdModel` with this search :class:`SearchEngine`. When registering a model, every time an instance is created, it will be indexed by the search engine. :param model: a :class:`StdModel` class. :param related: a list of related fields to include in the index. ''' update_model = UpdateSE(self, related) self.REGISTERED_MODELS[model] = update_model self.router.post_commit.bind(update_model, model) self.router.post_delete.bind(update_model, model)
python
def register(self, model, related=None): '''Register a :class:`StdModel` with this search :class:`SearchEngine`. When registering a model, every time an instance is created, it will be indexed by the search engine. :param model: a :class:`StdModel` class. :param related: a list of related fields to include in the index. ''' update_model = UpdateSE(self, related) self.REGISTERED_MODELS[model] = update_model self.router.post_commit.bind(update_model, model) self.router.post_delete.bind(update_model, model)
[ "def", "register", "(", "self", ",", "model", ",", "related", "=", "None", ")", ":", "update_model", "=", "UpdateSE", "(", "self", ",", "related", ")", "self", ".", "REGISTERED_MODELS", "[", "model", "]", "=", "update_model", "self", ".", "router", ".", "post_commit", ".", "bind", "(", "update_model", ",", "model", ")", "self", ".", "router", ".", "post_delete", ".", "bind", "(", "update_model", ",", "model", ")" ]
Register a :class:`StdModel` with this search :class:`SearchEngine`. When registering a model, every time an instance is created, it will be indexed by the search engine. :param model: a :class:`StdModel` class. :param related: a list of related fields to include in the index.
[ "Register", "a", ":", "class", ":", "StdModel", "with", "this", "search", ":", "class", ":", "SearchEngine", ".", "When", "registering", "a", "model", "every", "time", "an", "instance", "is", "created", "it", "will", "be", "indexed", "by", "the", "search", "engine", ".", ":", "param", "model", ":", "a", ":", "class", ":", "StdModel", "class", ".", ":", "param", "related", ":", "a", "list", "of", "related", "fields", "to", "include", "in", "the", "index", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/search.py#L67-L78
lsbardel/python-stdnet
stdnet/odm/search.py
SearchEngine.words_from_text
def words_from_text(self, text, for_search=False): '''Generator of indexable words in *text*. This functions loop through the :attr:`word_middleware` attribute to process the text. :param text: string from which to extract words. :param for_search: flag indicating if the the words will be used for search or to index the database. This flug is used in conjunction with the middleware flag *for_search*. If this flag is ``True`` (i.e. we need to search the database for the words in *text*), only the middleware functions in :attr:`word_middleware` enabled for searching are used. Default: ``False``. return a *list* of cleaned words. ''' if not text: return [] word_gen = self.split_text(text) for middleware, fors in self.word_middleware: if for_search and not fors: continue word_gen = middleware(word_gen) if isgenerator(word_gen): word_gen = list(word_gen) return word_gen
python
def words_from_text(self, text, for_search=False): '''Generator of indexable words in *text*. This functions loop through the :attr:`word_middleware` attribute to process the text. :param text: string from which to extract words. :param for_search: flag indicating if the the words will be used for search or to index the database. This flug is used in conjunction with the middleware flag *for_search*. If this flag is ``True`` (i.e. we need to search the database for the words in *text*), only the middleware functions in :attr:`word_middleware` enabled for searching are used. Default: ``False``. return a *list* of cleaned words. ''' if not text: return [] word_gen = self.split_text(text) for middleware, fors in self.word_middleware: if for_search and not fors: continue word_gen = middleware(word_gen) if isgenerator(word_gen): word_gen = list(word_gen) return word_gen
[ "def", "words_from_text", "(", "self", ",", "text", ",", "for_search", "=", "False", ")", ":", "if", "not", "text", ":", "return", "[", "]", "word_gen", "=", "self", ".", "split_text", "(", "text", ")", "for", "middleware", ",", "fors", "in", "self", ".", "word_middleware", ":", "if", "for_search", "and", "not", "fors", ":", "continue", "word_gen", "=", "middleware", "(", "word_gen", ")", "if", "isgenerator", "(", "word_gen", ")", ":", "word_gen", "=", "list", "(", "word_gen", ")", "return", "word_gen" ]
Generator of indexable words in *text*. This functions loop through the :attr:`word_middleware` attribute to process the text. :param text: string from which to extract words. :param for_search: flag indicating if the the words will be used for search or to index the database. This flug is used in conjunction with the middleware flag *for_search*. If this flag is ``True`` (i.e. we need to search the database for the words in *text*), only the middleware functions in :attr:`word_middleware` enabled for searching are used. Default: ``False``. return a *list* of cleaned words.
[ "Generator", "of", "indexable", "words", "in", "*", "text", "*", ".", "This", "functions", "loop", "through", "the", ":", "attr", ":", "word_middleware", "attribute", "to", "process", "the", "text", ".", ":", "param", "text", ":", "string", "from", "which", "to", "extract", "words", ".", ":", "param", "for_search", ":", "flag", "indicating", "if", "the", "the", "words", "will", "be", "used", "for", "search", "or", "to", "index", "the", "database", ".", "This", "flug", "is", "used", "in", "conjunction", "with", "the", "middleware", "flag", "*", "for_search", "*", ".", "If", "this", "flag", "is", "True", "(", "i", ".", "e", ".", "we", "need", "to", "search", "the", "database", "for", "the", "words", "in", "*", "text", "*", ")", "only", "the", "middleware", "functions", "in", ":", "attr", ":", "word_middleware", "enabled", "for", "searching", "are", "used", ".", "Default", ":", "False", ".", "return", "a", "*", "list", "*", "of", "cleaned", "words", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/search.py#L86-L112
lsbardel/python-stdnet
stdnet/odm/search.py
SearchEngine.add_word_middleware
def add_word_middleware(self, middleware, for_search=True): '''Add a *middleware* function to the list of :attr:`word_middleware`, for preprocessing words to be indexed. :param middleware: a callable receving an iterable over words. :param for_search: flag indicating if the *middleware* can be used for the text to search. Default: ``True``. ''' if hasattr(middleware, '__call__'): self.word_middleware.append((middleware, for_search))
python
def add_word_middleware(self, middleware, for_search=True): '''Add a *middleware* function to the list of :attr:`word_middleware`, for preprocessing words to be indexed. :param middleware: a callable receving an iterable over words. :param for_search: flag indicating if the *middleware* can be used for the text to search. Default: ``True``. ''' if hasattr(middleware, '__call__'): self.word_middleware.append((middleware, for_search))
[ "def", "add_word_middleware", "(", "self", ",", "middleware", ",", "for_search", "=", "True", ")", ":", "if", "hasattr", "(", "middleware", ",", "'__call__'", ")", ":", "self", ".", "word_middleware", ".", "append", "(", "(", "middleware", ",", "for_search", ")", ")" ]
Add a *middleware* function to the list of :attr:`word_middleware`, for preprocessing words to be indexed. :param middleware: a callable receving an iterable over words. :param for_search: flag indicating if the *middleware* can be used for the text to search. Default: ``True``.
[ "Add", "a", "*", "middleware", "*", "function", "to", "the", "list", "of", ":", "attr", ":", "word_middleware", "for", "preprocessing", "words", "to", "be", "indexed", ".", ":", "param", "middleware", ":", "a", "callable", "receving", "an", "iterable", "over", "words", ".", ":", "param", "for_search", ":", "flag", "indicating", "if", "the", "*", "middleware", "*", "can", "be", "used", "for", "the", "text", "to", "search", ".", "Default", ":", "True", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/search.py#L123-L132
lsbardel/python-stdnet
stdnet/odm/search.py
SearchEngine.query
def query(self, model): '''Return a query for ``model`` when it needs to be indexed. ''' session = self.router.session() fields = tuple((f.name for f in model._meta.scalarfields if f.type == 'text')) qs = session.query(model).load_only(*fields) for related in self.get_related_fields(model): qs = qs.load_related(related) return qs
python
def query(self, model): '''Return a query for ``model`` when it needs to be indexed. ''' session = self.router.session() fields = tuple((f.name for f in model._meta.scalarfields if f.type == 'text')) qs = session.query(model).load_only(*fields) for related in self.get_related_fields(model): qs = qs.load_related(related) return qs
[ "def", "query", "(", "self", ",", "model", ")", ":", "session", "=", "self", ".", "router", ".", "session", "(", ")", "fields", "=", "tuple", "(", "(", "f", ".", "name", "for", "f", "in", "model", ".", "_meta", ".", "scalarfields", "if", "f", ".", "type", "==", "'text'", ")", ")", "qs", "=", "session", ".", "query", "(", "model", ")", ".", "load_only", "(", "*", "fields", ")", "for", "related", "in", "self", ".", "get_related_fields", "(", "model", ")", ":", "qs", "=", "qs", ".", "load_related", "(", "related", ")", "return", "qs" ]
Return a query for ``model`` when it needs to be indexed.
[ "Return", "a", "query", "for", "model", "when", "it", "needs", "to", "be", "indexed", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/search.py#L142-L151
lsbardel/python-stdnet
stdnet/utils/version.py
get_version
def get_version(version): "Returns a PEP 386-compliant version number from *version*." assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') parts = 2 if version[2] == 0 else 3 main = '.'.join(map(str, version[:parts])) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[version[3]] + str(version[4]) return main + sub
python
def get_version(version): "Returns a PEP 386-compliant version number from *version*." assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') parts = 2 if version[2] == 0 else 3 main = '.'.join(map(str, version[:parts])) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[version[3]] + str(version[4]) return main + sub
[ "def", "get_version", "(", "version", ")", ":", "assert", "len", "(", "version", ")", "==", "5", "assert", "version", "[", "3", "]", "in", "(", "'alpha'", ",", "'beta'", ",", "'rc'", ",", "'final'", ")", "parts", "=", "2", "if", "version", "[", "2", "]", "==", "0", "else", "3", "main", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "version", "[", ":", "parts", "]", ")", ")", "sub", "=", "''", "if", "version", "[", "3", "]", "==", "'alpha'", "and", "version", "[", "4", "]", "==", "0", ":", "git_changeset", "=", "get_git_changeset", "(", ")", "if", "git_changeset", ":", "sub", "=", "'.dev%s'", "%", "git_changeset", "elif", "version", "[", "3", "]", "!=", "'final'", ":", "mapping", "=", "{", "'alpha'", ":", "'a'", ",", "'beta'", ":", "'b'", ",", "'rc'", ":", "'c'", "}", "sub", "=", "mapping", "[", "version", "[", "3", "]", "]", "+", "str", "(", "version", "[", "4", "]", ")", "return", "main", "+", "sub" ]
Returns a PEP 386-compliant version number from *version*.
[ "Returns", "a", "PEP", "386", "-", "compliant", "version", "number", "from", "*", "version", "*", "." ]
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/version.py#L20-L34
rodluger/everest
everest/transit.py
Get_RpRs
def Get_RpRs(d, **kwargs): ''' Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") def Depth(RpRs, **kwargs): return 1 - ps.Transit(RpRs=RpRs, **kwargs)([kwargs.get('t0', 0.)]) def DiffSq(r): return 1.e10 * (d - Depth(r, **kwargs)) ** 2 return fmin(DiffSq, [np.sqrt(d)], disp=False)
python
def Get_RpRs(d, **kwargs): ''' Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`. ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") def Depth(RpRs, **kwargs): return 1 - ps.Transit(RpRs=RpRs, **kwargs)([kwargs.get('t0', 0.)]) def DiffSq(r): return 1.e10 * (d - Depth(r, **kwargs)) ** 2 return fmin(DiffSq, [np.sqrt(d)], disp=False)
[ "def", "Get_RpRs", "(", "d", ",", "*", "*", "kwargs", ")", ":", "if", "ps", "is", "None", ":", "raise", "Exception", "(", "\"Unable to import `pysyzygy`.\"", ")", "def", "Depth", "(", "RpRs", ",", "*", "*", "kwargs", ")", ":", "return", "1", "-", "ps", ".", "Transit", "(", "RpRs", "=", "RpRs", ",", "*", "*", "kwargs", ")", "(", "[", "kwargs", ".", "get", "(", "'t0'", ",", "0.", ")", "]", ")", "def", "DiffSq", "(", "r", ")", ":", "return", "1.e10", "*", "(", "d", "-", "Depth", "(", "r", ",", "*", "*", "kwargs", ")", ")", "**", "2", "return", "fmin", "(", "DiffSq", ",", "[", "np", ".", "sqrt", "(", "d", ")", "]", ",", "disp", "=", "False", ")" ]
Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
[ "Returns", "the", "value", "of", "the", "planet", "radius", "over", "the", "stellar", "radius", "for", "a", "given", "depth", ":", "py", ":", "obj", ":", "d", "given", "the", ":", "py", ":", "class", ":", "everest", ".", "pysyzygy", "transit", ":", "py", ":", "obj", ":", "kwargs", "." ]
train
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/transit.py#L118-L134